Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
5,800 | def __parse_names():
filename = get_file()
with io.open(filename, , encoding=) as textfile:
next(textfile)
for line in textfile:
tokens = line.strip().split()
chebi_id = int(tokens[1])
if chebi_id not in __ALL_NAMES:
__ALL_NAMES[chebi_id] = []
nme = Name(tokens[4],
tokens[2],
tokens[3],
tokens[5] == ,
tokens[6])
__ALL_NAMES[chebi_id].append(nme) | Gets and parses file |
5,801 | def _get_libvirt_enum_string(prefix, value):
attributes = [attr[len(prefix):] for attr in libvirt.__dict__ if attr.startswith(prefix)]
prefixes = [_compute_subprefix(p) for p in attributes]
counts = {p: prefixes.count(p) for p in prefixes}
sub_prefixes = [p for p, count in counts.items() if count > 1 or (p.endswith() and p[:-1] in prefixes)]
filtered = [attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes]
for candidate in filtered:
if value == getattr(libvirt, .join((prefix, candidate))):
name = candidate.lower().replace(, )
return name
return | Convert the libvirt enum integer value into a human readable string.
:param prefix: start of the libvirt attribute to look for.
:param value: integer to convert to string |
5,802 | def delay(self, secs):
secs = int(secs)
for i in reversed(range(secs)):
sys.stdout.write()
sys.stdout.write("sleep %ds, left %2ds" % (secs, i+1))
sys.stdout.flush()
time.sleep(1)
sys.stdout.write("\n")
return self | Delay some seconds
Args:
secs: float seconds
Returns:
self |
5,803 | def _create_validate_config(vrn_file, rm_file, rm_interval_file, base_dir, data):
ref_call = {"file": str(rm_file), "name": "ref", "type": "grading-ref",
"fix-sample-header": True, "remove-refcalls": True}
a_intervals = get_analysis_intervals(data, vrn_file, base_dir)
if a_intervals:
a_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
ref_call["intervals"] = rm_interval_file
eval_call = {"file": vrn_file, "name": "eval", "remove-refcalls": True}
exp = {"sample": data["name"][-1],
"ref": dd.get_ref_file(data),
"approach": "grade",
"calls": [ref_call, eval_call]}
if a_intervals:
exp["intervals"] = os.path.abspath(a_intervals)
if data.get("align_bam"):
exp["align"] = data["align_bam"]
elif data.get("work_bam"):
exp["align"] = data["work_bam"]
return {"dir": {"base": base_dir, "out": "work", "prep": "work/prep"},
"experiments": [exp]} | Create a bcbio.variation configuration input for validation. |
5,804 | def sendmail(self, msg_from, msg_to, msg):
SMTP_dummy.msg_from = msg_from
SMTP_dummy.msg_to = msg_to
SMTP_dummy.msg = msg | Remember the recipients. |
5,805 | def cmd_partition(opts):
config = load_config(opts.config)
b = get_blockade(config, opts)
if opts.random:
if opts.partitions:
raise BlockadeError("Either specify individual partitions "
"or --random, but not both")
b.random_partition()
else:
partitions = []
for partition in opts.partitions:
names = []
for name in partition.split(","):
name = name.strip()
if name:
names.append(name)
partitions.append(names)
if not partitions:
raise BlockadeError("Either specify individual partitions "
"or random")
b.partition(partitions) | Partition the network between containers
Replaces any existing partitions outright. Any containers NOT specified
in arguments will be globbed into a single implicit partition. For
example if you have three containers: c1, c2, and c3 and you run:
blockade partition c1
The result will be a partition with just c1 and another partition with
c2 and c3.
Alternatively, --random may be specified, and zero or more random
partitions will be generated by blockade. |
5,806 | def new_cells(self, name=None, formula=None):
return self._impl.new_cells(name, formula).interface | Create a cells in the space.
Args:
name: If omitted, the model is named automatically ``CellsN``,
where ``N`` is an available number.
func: The function to define the formula of the cells.
Returns:
The new cells. |
5,807 | def _compute_ll_matrix(self, idx, param_vals, num_pts):
if idx >= len(num_pts):
return -1.0 * self.update_hyperparameters(
scipy.asarray(param_vals, dtype=float)
)
else:
vals = scipy.zeros(num_pts[idx:], dtype=float)
for k in xrange(0, len(param_vals[idx])):
specific_param_vals = list(param_vals)
specific_param_vals[idx] = param_vals[idx][k]
vals[k] = self._compute_ll_matrix(
idx + 1,
specific_param_vals,
num_pts
)
return vals | Recursive helper function for compute_ll_matrix.
Parameters
----------
idx : int
The index of the parameter for this layer of the recursion to
work on. `idx` == len(`num_pts`) is the base case that terminates
the recursion.
param_vals : List of :py:class:`Array`
List of arrays of parameter values. Entries in the slots 0:`idx` are
set to scalars by the previous levels of recursion.
num_pts : :py:class:`Array`
The numbers of points for each parameter.
Returns
-------
vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities at lower
levels. |
5,808 | def build(self, X, Y, w=None, edges=None):
super(MorseComplex, self).build(X, Y, w, edges)
if self.debug:
sys.stdout.write("Decomposition: ")
start = time.clock()
morse_complex = MorseComplexFloat(
vectorFloat(self.Xnorm.flatten()),
vectorFloat(self.Y),
str(self.gradient),
str(self.simplification),
vectorFloat(self.w),
self.graph_rep.full_graph(),
self.debug,
)
self.__amc = morse_complex
self.persistences = []
self.merge_sequence = {}
morse_complex_json = json.loads(morse_complex.to_json())
hierarchy = morse_complex_json["Hierarchy"]
for merge in hierarchy:
self.persistences.append(merge["Persistence"])
self.merge_sequence[merge["Dying"]] = (
merge["Persistence"],
merge["Surviving"],
merge["Saddle"],
)
self.persistences = sorted(list(set(self.persistences)))
partitions = morse_complex_json["Partitions"]
self.base_partitions = {}
for i, label in enumerate(partitions):
if label not in self.base_partitions:
self.base_partitions[label] = []
self.base_partitions[label].append(i)
self.max_indices = list(self.base_partitions.keys())
if self.debug:
end = time.clock()
sys.stdout.write("%f s\n" % (end - start)) | Assigns data to this object and builds the Morse-Smale
Complex
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph. |
5,809 | def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None):
if not is_valid_release_short(short):
raise ValueError("Release short name is not valid: %s" % short)
if not is_valid_release_version(version):
raise ValueError("Release short version is not valid: %s" % version)
if not is_valid_release_type(type):
raise ValueError("Release type is not valid: %s" % type)
if type == "ga":
result = "%s-%s" % (short, version)
else:
result = "%s-%s-%s" % (short, version, type)
if bp_short:
result += "@%s" % create_release_id(bp_short, bp_version, bp_type)
return result | Create release_id from given parts.
:param short: Release short name
:type short: str
:param version: Release version
:type version: str
:param version: Release type
:type version: str
:param bp_short: Base Product short name
:type bp_short: str
:param bp_version: Base Product version
:type bp_version: str
:param bp_type: Base Product type
:rtype: str |
5,810 | def graph_from_seeds(seeds, cell_source):
if hasattr(cell_source, ):
cellmap = cell_source.cellmap
cells = cellmap
G = cell_source.G
for c in seeds:
G.add_node(c)
cellmap[c.address()] = c
else:
cellmap = dict([(x.address(),x) for x in seeds])
cells = cell_source.cells
G = networkx.DiGraph()
for c in cellmap.values(): G.add_node(c)
todo = [s for s in seeds if s.formula]
steps = [i for i,s in enumerate(todo)]
names = cell_source.named_ranges
while todo:
c1 = todo.pop()
step = steps.pop()
cursheet = c1.sheet
pystr, ast = cell2code(c1, names)
c1.python_expression = pystr.replace(, "OFFSETINDEX|LDGL$:::pointerOFFSETINDEX%s:%st in the cellmap
for addr in cellmap_add_addresses:
sheet_new, col_new, row_new = split_address(addr)
if sheet_new != sheet_initial:
sheet_initial = sheet_new
max_rows, max_cols = max_dimension(cellmap, sheet_new)
if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:
cell_new = Cell(addr, sheet_new, value="", should_eval=)
cellmap[addr] = cell_new
G.add_node(cell_new)
cell_source.cells[addr] = cell_new
rng = cell_source.Range(reference)
if address in cellmap:
virtual_cell = cellmap[address]
else:
virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )
cellmap[address] = virtual_cell
G.add_node(virtual_cell)
G.add_edge(virtual_cell, c1)
target = virtual_cell
origins = []
if len(list(rng.keys())) != 0:
cell_source.pointers.add(cell.address())
else:
virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )
origins = [virtual_cell]
target = c1
for c2 in flatten(origins):
if c2.address() not in cellmap:
if c2.formula:
todo.append(c2)
steps.append(step+1)
else:
pystr,ast = cell2code(c2, names)
c2.python_expression = pystr
c2.compile()
cellmap[c2.address()] = c2
G.add_node(c2)
if(target != []):
G.add_edge(c2,target)
c1.compile()
return (cellmap, G) | This creates/updates a networkx graph from a list of cells.
The graph is created when the cell_source is an instance of ExcelCompiler
The graph is updated when the cell_source is an instance of Spreadsheet |
5,811 | def economic_svd(G, epsilon=sqrt(finfo(float).eps)):
r
from scipy.linalg import svd
G = asarray(G, float)
(U, S, V) = svd(G, full_matrices=False, check_finite=False)
ok = S >= epsilon
S = S[ok]
U = U[:, ok]
V = V[ok, :]
return (U, S, V) | r"""Economic Singular Value Decomposition.
Args:
G (array_like): Matrix to be factorized.
epsilon (float): Threshold on the square root of the eigen values.
Default is ``sqrt(finfo(float).eps)``.
Returns:
:class:`numpy.ndarray`: Unitary matrix.
:class:`numpy.ndarray`: Singular values.
:class:`numpy.ndarray`: Unitary matrix.
See Also
--------
numpy.linalg.svd : Cholesky decomposition.
scipy.linalg.svd : Cholesky decomposition. |
5,812 | def invoke(self, headers, body):
xml = Service._create_request(headers, body)
try:
response = self.session.post(self.endpoint, verify=False, data=xml)
logging.debug(response.content)
except Exception as e:
traceback.print_exc()
raise WSManException(e)
if response.status_code == 200:
return Service._parse_response(response.content)
if response.status_code == 401:
raise WSManAuthenticationException()
raise WSManException( % response.status_code) | Invokes the soap service |
5,813 | def autohook(ui, repo, hooktype, **kwargs):
cmd = hooktype.replace("-", "_")
if not repo or not cmd.replace("_", "").isalpha():
return False
result = False
trusted = ui.configlist("autohooks", "trusted")
if "" not in trusted:
default_path = ui.config("paths", "default")
if not default_path:
return False
for match in trusted:
if default_path.startswith(match):
break
else:
return False
for hookdir in ("hg-autohooks", ".hg-autohooks"):
dirname = os.path.join(repo.root, hookdir)
if not os.path.exists(dirname):
continue
for leafname in os.listdir(dirname):
if not leafname.startswith(cmd + "."):
continue
filename = os.path.join(dirname, leafname)
result = _runhook(ui, repo, hooktype, filename, kwargs) or result
return result | Look for hooks inside the repository to run. |
5,814 | def qscan(self, cursor=0, count=None, busyloop=None, minlen=None,
maxlen=None, importrate=None):
command = ["QSCAN", cursor]
if count:
command += ["COUNT", count]
if busyloop:
command += ["BUSYLOOP"]
if minlen:
command += ["MINLEN", minlen]
if maxlen:
command += ["MAXLEN", maxlen]
if importrate:
command += ["IMPORTRATE", importrate]
return self.execute_command(*command) | Iterate all the existing queues in the local node.
:param count: An hint about how much work to do per iteration.
:param busyloop: Block and return all the elements in a busy loop.
:param minlen: Don't return elements with less than count jobs queued.
:param maxlen: Don't return elements with more than count jobs queued.
:param importrate: Only return elements with an job import rate
(from other nodes) >= rate. |
5,815 | def filter(self, filter_func, reverse=False):
new_log_file = Log()
new_log_file.logfile = self.logfile
new_log_file.total_lines = 0
new_log_file._valid_lines = []
new_log_file._invalid_lines = self._invalid_lines[:]
if not reverse:
for i in self._valid_lines:
if filter_func(i):
new_log_file.total_lines += 1
new_log_file._valid_lines.append(i)
else:
for i in self._valid_lines:
if not filter_func(i):
new_log_file.total_lines += 1
new_log_file._valid_lines.append(i)
return new_log_file | Filter current log lines by a given filter function.
This allows to drill down data out of the log file by filtering the
relevant log lines to analyze.
For example, filter by a given IP so only log lines for that IP are
further processed with commands (top paths, http status counter...).
:param filter_func: [required] Filter method, see filters.py for all
available filters.
:type filter_func: function
:param reverse: negate the filter (so accept all log lines that return
``False``).
:type reverse: boolean
:returns: a new instance of Log containing only log lines
that passed the filter function.
:rtype: :class:`Log` |
5,816 | def _store_outputs_in_object_store(self, object_ids, outputs):
for i in range(len(object_ids)):
if isinstance(outputs[i], ray.actor.ActorHandle):
raise Exception("Returning an actor handle from a remote "
"function is not allowed).")
if outputs[i] is ray.experimental.no_return.NoReturn:
if not self.plasma_client.contains(
pyarrow.plasma.ObjectID(object_ids[i].binary())):
raise RuntimeError(
"Attempting to return "
"from a remote function, but the corresponding "
"ObjectID does not exist in the local object store.")
else:
self.put_object(object_ids[i], outputs[i]) | Store the outputs of a remote function in the local object store.
This stores the values that were returned by a remote function in the
local object store. If any of the return values are object IDs, then
these object IDs are aliased with the object IDs that the scheduler
assigned for the return values. This is called by the worker that
executes the remote function.
Note:
The arguments object_ids and outputs should have the same length.
Args:
object_ids (List[ObjectID]): The object IDs that were assigned to
the outputs of the remote function call.
outputs (Tuple): The value returned by the remote function. If the
remote function was supposed to only return one value, then its
output was wrapped in a tuple with one element prior to being
passed into this function. |
5,817 | def _fix_attribute_names(attrs, change_map):
new_attr = {}
for k in attrs.keys():
if k in change_map:
new_attr[change_map[k]] = attrs[k]
else:
new_attr[k] = attrs[k]
return new_attr | Change attribute names as per values in change_map dictionary.
Parameters
----------
:param attrs : dict Dict of operator attributes
:param change_map : dict Dict of onnx attribute name to mxnet attribute names.
Returns
-------
:return new_attr : dict Converted dict of operator attributes. |
5,818 | def walkWords(self, showPadding: bool=False):
wIndex = 0
lastEnd = self.startBitAddr
parts = []
for p in self.parts:
end = p.startOfPart
if showPadding and end != lastEnd:
while end != lastEnd:
assert end >= lastEnd, (end, lastEnd)
endOfWord = ceil(
(lastEnd + 1) / self.wordWidth) * self.wordWidth
endOfPadding = min(endOfWord, end)
_p = TransPart(self, None, lastEnd, endOfPadding, 0)
parts.append(_p)
if endOfPadding >= endOfWord:
yield (wIndex, parts)
wIndex += 1
parts = []
lastEnd = endOfPadding
if self._wordIndx(lastEnd) != self._wordIndx(p.startOfPart):
yield (wIndex, parts)
wIndex += 1
parts = []
lastEnd = p.endOfPart
parts.append(p)
lastEnd = p.endOfPart
if lastEnd % self.wordWidth == 0:
yield (wIndex, parts)
wIndex += 1
parts = []
if showPadding and (parts
or lastEnd != self.endBitAddr
or lastEnd % self.wordWidth != 0):
end = ceil(self.endBitAddr / self.wordWidth) * self.wordWidth
while end != lastEnd:
assert end >= lastEnd, (end, lastEnd)
endOfWord = ((lastEnd // self.wordWidth) + 1) * self.wordWidth
endOfPadding = min(endOfWord, end)
_p = TransPart(self, None, lastEnd, endOfPadding, 0)
_p.parent = self
parts.append(_p)
if endOfPadding >= endOfWord:
yield (wIndex, parts)
wIndex += 1
parts = []
lastEnd = endOfPadding
if parts:
yield (wIndex, parts) | Walk enumerated words in this frame
:attention: not all indexes has to be present, only words
with items will be generated when not showPadding
:param showPadding: padding TransParts are also present
:return: generator of tuples (wordIndex, list of TransParts
in this word) |
5,819 | def get_base_path() -> Path:
env_data_path = os.environ.get("EFB_DATA_PATH", None)
if env_data_path:
base_path = Path(env_data_path).resolve()
else:
base_path = Path.home() / ".ehforwarderbot"
if not base_path.exists():
base_path.mkdir(parents=True)
return base_path | Get the base data path for EFB. This can be defined by the
environment variable ``EFB_DATA_PATH``.
If ``EFB_DATA_PATH`` is not defined, this gives
``~/.ehforwarderbot``.
This method creates the queried path if not existing.
Returns:
The base path. |
5,820 | def commit_confirmed(name):
20180726083540640360
confirmed = {
: name,
: None,
: {},
:
}
if __opts__[]:
confirmed[] = .format(name)
return confirmed
ret = __salt__[](name)
confirmed.update(ret)
return confirmed | .. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
State SLS Example:
.. code-block:: yaml
'20180726083540640360':
netconfig.commit_confirmed |
5,821 | def describe_ring(self, keyspace):
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_describe_ring(keyspace)
return d | get the token ring: a map of ranges to host addresses,
represented as a set of TokenRange instead of a map from range
to list of endpoints, because you can't use Thrift structs as
map keys:
https://issues.apache.org/jira/browse/THRIFT-162
for the same reason, we can't return a set here, even though
order is neither important nor predictable.
Parameters:
- keyspace |
5,822 | def trainTopicOnTweets(self, twitterQuery, useTweetText=True, useIdfNormalization=True,
normalization="linear", maxTweets=2000, maxUsedLinks=500, ignoreConceptTypes=[],
maxConcepts = 20, maxCategories = 10, notifyEmailAddress = None):
assert maxTweets < 5000, "we can analyze at most 5000 tweets"
params = {"twitterQuery": twitterQuery, "useTweetText": useTweetText,
"useIdfNormalization": useIdfNormalization, "normalization": normalization,
"maxTweets": maxTweets, "maxUsedLinks": maxUsedLinks,
"maxConcepts": maxConcepts, "maxCategories": maxCategories }
if notifyEmailAddress:
params["notifyEmailAddress"] = notifyEmailAddress
if len(ignoreConceptTypes) > 0:
params["ignoreConceptTypes"] = ignoreConceptTypes
return self._er.jsonRequestAnalytics("/api/v1/trainTopicOnTwitter", params) | create a new topic and train it using the tweets that match the twitterQuery
@param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url),
a hash tag (using "#" prefix) or a regular keyword.
@param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared
in the articles in the user's tweets will be analyzed
@param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts)
@param normalization: way to normalize the concept weights ("none", "linear")
@param maxTweets: maximum number of tweets to collect (default 2000, max 5000)
@param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000)
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param maxConcepts: the number of concepts to save in the final topic
@param maxCategories: the number of categories to save in the final topic
@param maxTweets: the maximum number of tweets to collect for the user to analyze
@param notifyEmailAddress: when finished, should we send a notification email to this address? |
5,823 | def get_slab_stats(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.send("stats slabs\n")
try:
data = ""
while True:
data += s.recv(4096)
if data.endswith():
break
return data
finally:
s.close() | Retrieve slab stats from memcached. |
5,824 | def makeB(self, buses=None, branches=None, method="XB"):
buses = self.connected_buses if buses is None else buses
branches = self.online_branches if branches is None else branches
B_buses = copy.deepcopy(buses)
Bp_branches = copy.deepcopy(branches)
Bpp_branches = copy.deepcopy(branches)
for bus in B_buses:
bus.b_shunt = 0.0
for branch in Bp_branches:
branch.b = 0.0
branch.ratio = 1.0
if method == "XB":
branch.r = 0.0
Yp, _, _ = self.getYbus(B_buses, Bp_branches)
for branch in Bpp_branches:
branch.phase_shift = 0.0
if method == "BX":
branch.r = 0.0
Ypp, _, _ = self.getYbus(B_buses, Bpp_branches)
del B_buses
del Bp_branches
return -Yp.imag, -Ypp.imag | Based on makeB.m from MATPOWER by Ray Zimmerman, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
@param method: Specify "XB" or "BX" method.
@type method: string
@rtype: tuple
@return: Two matrices, B prime and B double prime, used in the fast
decoupled power flow solver. |
5,825 | def user(context, mail):
LOG.info("Running scout delete user")
adapter = context.obj[]
user_obj = adapter.user(mail)
if not user_obj:
LOG.warning("User {0} could not be found in database".format(mail))
else:
adapter.delete_user(mail) | Delete a user from the database |
5,826 | def newTextChild(self, ns, name, content):
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewTextChild(self._o, ns__o, name, content)
if ret is None:raise treeError()
__tmp = xmlNode(_obj=ret)
return __tmp | Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child TEXT node will be created containing the string
@content. NOTE: Use xmlNewChild() if @content will contain
entities that need to be preserved. Use this function,
xmlNewTextChild(), if you need to ensure that reserved XML
chars that might appear in @content, such as the ampersand,
greater-than or less-than signs, are automatically replaced
by their XML escaped entity representations. |
5,827 | def color_grid(data, palette, denom=9.0, mask_zeros=True):
grid = []
try:
float(data[0][0])
palette = matplotlib.colors.LinearSegmentedColormap.from_list(
"color_grid", palette)
palette.set_bad(alpha=0)
except:
pass
for row in range(len(data)):
grid.append([])
for col in range(len(data[row])):
try:
rgb = color_array_by_value(data[row][col], palette, denom,
mask_zeros)
except:
rgb = color_array_by_hue_mix(data[row][col], palette)
grid[row].append(rgb)
return grid | Convert the given data (2d array of numbers or binary strings) to a 2d
array of RGB or RGBA values which can then be visualized as a heat map.
Arguments:
data - 2d array of numbers or binary strings
palette - a seaborn palette (list of RGB values) indicating how to convert
data to colors. Will be converted to a continuous colormap if
necessary. This should generally be the length of the longest
binary string or the highest possible number
denom - if the data is composed of numbers rather than binary strings,
this number will indicate how to normalize the data to [0, 1]
should it be neccessary.
mask_zeros - Boolean indicating whether 0s should be colored white rather
than the color specified by the palette. -1s always yield
-1 so that missing data can be handled appropriately. |
5,828 | def _filter_namespaces_by_route_whitelist(self):
assert self._routes is not None, "Missing route whitelist"
assert in self._routes
assert in self._routes
route_whitelist = {}
for namespace_name, route_reprs in self._routes[].items():
new_route_reprs = []
if route_reprs == []:
namespace = self.api.namespaces[namespace_name]
new_route_reprs = [route.name_with_version() for route in namespace.routes]
else:
for route_repr in route_reprs:
route_name, version = parse_route_name_and_version(route_repr)
if version > 1:
new_route_reprs.append(.format(route_name, version))
else:
new_route_reprs.append(route_name)
route_whitelist[namespace_name] = new_route_reprs
route_data_types = []
for namespace_name, route_reprs in route_whitelist.items():
if namespace_name not in self.api.namespaces:
raise AssertionError( % namespace_name)
namespace = self.api.namespaces[namespace_name]
if namespace.doc is not None:
route_data_types.extend(
parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name))
data_type = self.api.namespaces[namespace_name].data_type_by_name[datatype_name]
route_data_types.append(data_type)
output_types_by_ns, output_routes_by_ns = self._find_dependencies(route_data_types)
for namespace in self.api.namespaces.values():
data_types = list(set(output_types_by_ns[namespace.name]))
namespace.data_types = data_types
namespace.data_type_by_name = {d.name: d for d in data_types}
output_route_reprs = [output_route.name_with_version()
for output_route in output_routes_by_ns[namespace.name]]
if namespace.name in route_whitelist:
whitelisted_route_reprs = route_whitelist[namespace.name]
route_reprs = list(set(whitelisted_route_reprs + output_route_reprs))
else:
route_reprs = output_route_reprs
routes = []
for route_repr in route_reprs:
route_name, version = parse_route_name_and_version(route_repr)
route = namespace.routes_by_name[route_name].at_version[version]
routes.append(route)
namespace.routes = []
namespace.route_by_name = {}
namespace.routes_by_name = {}
for route in routes:
namespace.add_route(route) | Given a parsed API in IR form, filter the user-defined datatypes
so that they include only the route datatypes and their direct dependencies. |
5,829 | def onchange(self, new_value):
self.disable_refresh()
self.set_value(new_value)
self.enable_refresh()
return (new_value, ) | Called when the user changes the TextInput content.
With single_line=True it fires in case of focus lost and Enter key pressed.
With single_line=False it fires at each key released.
Args:
new_value (str): the new string content of the TextInput. |
5,830 | def _RemoveForemanRule(self):
if data_store.RelationalDBEnabled():
data_store.REL_DB.RemoveForemanRule(hunt_id=self.session_id.Basename())
return
with aff4.FACTORY.Open(
"aff4:/foreman", mode="rw", token=self.token) as foreman:
aff4_rules = foreman.Get(foreman.Schema.RULES)
aff4_rules = foreman.Schema.RULES(
[r for r in aff4_rules if r.hunt_id != self.session_id])
foreman.Set(aff4_rules) | Removes the foreman rule corresponding to this hunt. |
5,831 | def get_editor_nodes(self, editor, node=None):
return [editor_node for editor_node in self.list_editor_nodes(node) if editor_node.editor == editor] | Returns the :class:`umbra.components.factory.script_editor.nodes.EditorNode` class Nodes with given editor.
:param node: Node to start walking from.
:type node: AbstractNode or AbstractCompositeNode or Object
:param editor: Editor.
:type editor: Editor
:return: EditorNode nodes.
:rtype: list |
5,832 | def send_sms(message, from_number, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None):
connection = connection or get_sms_connection(username=auth_user, password=auth_password, fail_silently=fail_silently)
mail = SMSMessage(message, from_number, recipient_list, connection=connection)
return mail.send() | Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
https://github.com/django/django/blob/master/django/core/mail/__init__.py#L40 |
5,833 | def require_Gtk(min_version=2):
if not _in_X:
raise RuntimeError()
if _has_Gtk < min_version:
raise RuntimeError()
if _has_Gtk == 2:
logging.getLogger(__name__).warn(
_("Missing runtime dependency GTK 3. Falling back to GTK 2 "
"for password prompt"))
from gi.repository import Gtk
if not Gtk.init_check(None)[0]:
raise RuntimeError(_("X server not connected!"))
return Gtk | Make sure Gtk is properly initialized.
:raises RuntimeError: if Gtk can not be properly initialized |
5,834 | def walk(self, where="/"):
_tables()
self._check_if_open()
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, , None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, , None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip(), groups, leaves) | Walk the pytables group hierarchy for pandas objects
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
.. versionadded:: 0.24.0
Parameters
----------
where : str, optional
Group where to start walking.
If not supplied, the root group is used.
Yields
------
path : str
Full path to a group (without trailing '/')
groups : list of str
names of the groups contained in `path`
leaves : list of str
names of the pandas objects contained in `path` |
5,835 | def rename_table(dbconn, original, new):
cur = dbconn.cursor()
cur.execute("ALTER TABLE RENAME TO ".format(original=original, new=new)) | Rename a table in the database
:param dbconn: database connection
:param original: original table name
:param new: new table name |
5,836 | def _get_reference(self):
super()._get_reference()
self.cubeA_body_id = self.sim.model.body_name2id("cubeA")
self.cubeB_body_id = self.sim.model.body_name2id("cubeB")
self.l_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms
]
self.r_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms
]
self.cubeA_geom_id = self.sim.model.geom_name2id("cubeA")
self.cubeB_geom_id = self.sim.model.geom_name2id("cubeB") | Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data. |
5,837 | def _post(self, url, data=None):
url = urljoin(self.base_url, url)
try:
r = self._make_request(**dict(
method=,
url=url,
json=data,
auth=self.auth,
timeout=self.timeout,
hooks=self.request_hooks,
headers=self.request_headers
))
except requests.exceptions.RequestException as e:
raise e
else:
if r.status_code >= 400:
try:
error_data = r.json()
except ValueError:
error_data = { "response": r }
raise MailChimpError(error_data)
if r.status_code == 204:
return None
return r.json() | Handle authenticated POST requests
:param url: The url for the endpoint including path parameters
:type url: :py:class:`str`
:param data: The request body parameters
:type data: :py:data:`none` or :py:class:`dict`
:returns: The JSON output from the API or an error message |
5,838 | def get_last_day_of_month(t: datetime) -> int:
tn = t + timedelta(days=32)
tn = datetime(year=tn.year, month=tn.month, day=1)
tt = tn - timedelta(hours=1)
return tt.day | Returns day number of the last day of the month
:param t: datetime
:return: int |
5,839 | def forward(self, input_tensor):
ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1])
dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False)
if self.inplace:
input_tensor *= dropout_mask.unsqueeze(1)
return None
else:
return dropout_mask.unsqueeze(1) * input_tensor | Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied. |
5,840 | def path_shift(self, count=1):
if count == 0: return
pathlist = self.path.strip().split()
scriptlist = self.environ.get(,).strip().split()
if pathlist and pathlist[0] == : pathlist = []
if scriptlist and scriptlist[0] == : scriptlist = []
if count > 0 and count <= len(pathlist):
moved = pathlist[:count]
scriptlist = scriptlist + moved
pathlist = pathlist[count:]
elif count < 0 and count >= -len(scriptlist):
moved = scriptlist[count:]
pathlist = moved + pathlist
scriptlist = scriptlist[:count]
else:
empty = if count < 0 else
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
self[] = self.path = + .join(pathlist) \
+ ( if self.path.endswith() and pathlist else )
self[] = + .join(scriptlist)
return .join(moved) | Shift some levels of PATH_INFO into SCRIPT_NAME and return the
moved part. count defaults to 1 |
5,841 | def show(ctx):
for app_name, app in ctx.obj[][].items():
click.echo(click.style(app_name, fg=, bold=True))
for migration in app[]:
applied = ctx.obj[].is_migration_applied(app_name, migration)
click.echo(.format(migration, click.style(, bold=True) if applied else )) | Show migrations list |
5,842 | def average(self, projection=None):
length = self.size()
if projection:
return sum(self.map(projection)) / length
else:
return sum(self) / length | Takes the average of elements in the sequence
>>> seq([1, 2]).average()
1.5
>>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1])
:param projection: function to project on the sequence before taking the average
:return: average of elements in the sequence |
5,843 | def capture_ratio(self, benchmark, threshold=0.0, compare_op=("ge", "lt")):
if isinstance(compare_op(tuple, list)):
op1, op2 = compare_op
else:
op1, op2 = compare_op, compare_op
uc = self.up_capture(
benchmark=benchmark, threshold=threshold, compare_op=op1
)
dc = self.down_capture(
benchmark=benchmark, threshold=threshold, compare_op=op2
)
return uc / dc | Capture ratio--ratio of upside to downside capture.
Upside capture ratio divided by the downside capture ratio.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is greater than/less than `threshold`.
compare_op : {tuple, str, list}, default ('ge', 'lt')
Comparison operator used to compare to `threshold`.
If a sequence, the two elements are passed to
`self.up_capture()` and `self.down_capture()`, respectively.
If `str`, indicates the comparison operater used in
both method calls.
Returns
-------
float |
5,844 | def _extract_info_from_package(dependency,
extract_type=None,
debug=False,
include_build_requirements=False
):
output_folder = tempfile.mkdtemp(prefix="pythonpackage-metafolder-")
try:
extract_metainfo_files_from_package(
dependency, output_folder, debug=debug
)
with open(os.path.join(output_folder, "METADATA"),
"r", encoding="utf-8"
) as f:
metadata_entries = f.read().partition("\n\n")[0].splitlines()
if extract_type == "name":
name = None
for meta_entry in metadata_entries:
if meta_entry.lower().startswith("name:"):
return meta_entry.partition(":")[2].strip()
if name is None:
raise ValueError("failed to obtain package name")
return name
elif extract_type == "dependencies":
requirements = []
if os.path.exists(os.path.join(output_folder,
)
) and include_build_requirements:
with open(os.path.join(output_folder, )) as f:
build_sys = pytoml.load(f)[]
if "requires" in build_sys:
requirements += build_sys["requires"]
requirements += [
entry.rpartition("Requires-Dist:")[2].strip()
for entry in metadata_entries
if entry.startswith("Requires-Dist")
]
return list(set(requirements))
finally:
shutil.rmtree(output_folder) | Internal function to extract metainfo from a package.
Currently supported info types:
- name
- dependencies (a list of dependencies) |
5,845 | def peek(self):
if self.reading is None:
raise StreamEmptyError("peek called on virtual stream walker without any data", selector=self.selector)
return self.reading | Peek at the oldest reading in this virtual stream. |
5,846 | def create_db_entry(self, tfi):
if tfi.task.department.assetflag:
comment = self.asset_comment_pte.toPlainText()
else:
comment = self.shot_comment_pte.toPlainText()
return tfi.create_db_entry(comment) | Create a db entry for the given task file info
:param tfi: the info for a TaskFile entry in the db
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: the created taskfile and note
:rtype: tuple
:raises: ValidationError |
5,847 | def parseAndSave(option, urlOrPaths, outDir=None, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType=, metaExtension=,
services={: , : , : }):
_meta.json
metaPaths = []
paths = getPaths(urlOrPaths)
for path in paths:
if outDir is None:
metaPath = path + metaExtension
else:
metaPath = os.path.join(outDir, os.path.split(path)[1] + metaExtension)
log.info( % metaPath)
with open(metaPath, , ) as f:
f.write(parse1(option, path, serverEndpoint, verbose, tikaServerJar, \
responseMimeType, services)[1] + u"\n")
metaPaths.append(metaPath)
return metaPaths | Parse the objects and write extracted metadata and/or text in JSON format to matching
filename with an extension of '_meta.json'.
:param option:
:param urlOrPaths:
:param outDir:
:param serverEndpoint:
:param verbose:
:param tikaServerJar:
:param responseMimeType:
:param metaExtension:
:param services:
:return: |
5,848 | def response_hook(self, response, **kwargs) -> HTMLResponse:
if not response.encoding:
response.encoding = DEFAULT_ENCODING
return HTMLResponse._from_response(response, self) | Change response enconding and replace it by a HTMLResponse. |
5,849 | def graceful_stop(self, signal_number=None, stack_frame=None):
stop_msg = "Hard" if self.shutdown else "Graceful"
if signal_number is None:
self.log.info("%s stop called manually. "
"Shutting down.", stop_msg)
else:
self.log.info("%s stop called by signal
"Stack Frame: %s",
stop_msg, signal_number, stack_frame)
self.shutdown = True
self.crawler_list.stop()
self.daemon_list.stop()
self.thread_event.set()
return True | This function will be called when a graceful-stop is initiated. |
5,850 | def _decode_attributes(self):
try:
from_jid = self._element.get()
if from_jid:
self._from_jid = JID(from_jid)
to_jid = self._element.get()
if to_jid:
self._to_jid = JID(to_jid)
except ValueError:
raise JIDMalformedProtocolError
self._stanza_type = self._element.get()
self._stanza_id = self._element.get()
lang = self._element.get(XML_LANG_QNAME)
if lang:
self._language = lang | Decode attributes of the stanza XML element
and put them into the stanza properties. |
5,851 | def get_language_from_json(language, key):
file_name = os.path.join(
os.path.dirname(__file__),
,
).format(key.lower())
if os.path.exists(file_name):
try:
with open(file_name, , encoding=) as fh:
languages = json.loads(fh.read())
if languages.get(language.lower()):
return languages[language.lower()]
except:
log.traceback(logging.DEBUG)
return None | Finds the given language in a json file. |
5,852 | def submit_import(cls, volume, location, project=None, name=None,
overwrite=False, properties=None, parent=None,
preserve_folder_structure=True, api=None):
data = {}
volume = Transform.to_volume(volume)
if project and parent:
raise SbgError(
)
elif project:
project = Transform.to_project(project)
destination = {
: project
}
elif parent:
parent = Transform.to_file(parent)
destination = {
: parent
}
else:
raise SbgError()
source = {
: volume,
: location
}
if name:
destination[] = name
data[] = source
data[] = destination
data[] = overwrite
if not preserve_folder_structure:
data[] = preserve_folder_structure
if properties:
data[] = properties
api = api if api else cls._API
extra = {
: cls.__name__,
: data
}
logger.info(, extra=extra)
_import = api.post(cls._URL[], data=data).json()
return Import(api=api, **_import) | Submits new import job.
:param volume: Volume identifier.
:param location: Volume location.
:param project: Project identifier.
:param name: Optional file name.
:param overwrite: If true it will overwrite file if exists.
:param properties: Properties dictionary.
:param parent: The ID of the target folder to which the item should be
imported. Should not be used together with project.
:param preserve_folder_structure: Whether to keep the exact source
folder structure. The default value is true if the item being
imported is a folder. Should not be used if you are importing
a file.
:param api: Api instance.
:return: Import object. |
5,853 | def _control_transfer(self, data):
LOGGER.debug(, data)
self._device.ctrl_transfer(bmRequestType=0x21, bRequest=0x09,
wValue=0x0200, wIndex=0x01, data_or_wLength=data, timeout=TIMEOUT) | Send device a control request with standard parameters and <data> as
payload. |
5,854 | def process(self, request_adu):
validate_crc(request_adu)
return super(RTUServer, self).process(request_adu) | Process request ADU and return response.
:param request_adu: A bytearray containing the ADU request.
:return: A bytearray containing the response of the ADU request. |
5,855 | def __onclick(self, event):
if event.inaxes == self.ax1:
self.x_coord = event.xdata
self.y_coord = event.ydata
self.__reset_crosshair()
x, y = self.__map2img(self.x_coord, self.y_coord)
subset_vertical = self.__read_timeseries(x, y)
if not self.checkbox.value:
self.__init_vertical_plot()
label = .format(x, y)
self.ax2.plot(self.timestamps, subset_vertical, label=label)
self.ax2_legend = self.ax2.legend(loc=0, prop={: 7}, markerscale=1) | respond to mouse clicks in the plot.
This function responds to clicks on the first (horizontal slice) plot and updates the vertical profile and
slice plots
Parameters
----------
event: matplotlib.backend_bases.MouseEvent
the click event object containing image coordinates |
5,856 | def create_portable_topology(topol, struct, **kwargs):
_topoldir, _topol = os.path.split(topol)
processed = kwargs.pop(, os.path.join(_topoldir, +_topol))
grompp_kwargs, mdp_kwargs = filter_grompp_options(**kwargs)
mdp_kwargs = add_mdp_includes(topol, mdp_kwargs)
with tempfile.NamedTemporaryFile(suffix=) as mdp:
mdp.write(.format(**mdp_kwargs))
mdp.flush()
grompp_kwargs[] = topol
grompp_kwargs[] = processed
grompp_kwargs[] = mdp.name
grompp_kwargs[] = struct
grompp_kwargs[] = False
try:
gromacs.grompp(**grompp_kwargs)
finally:
utilities.unlink_gmx(, )
return utilities.realpath(processed) | Create a processed topology.
The processed (or portable) topology file does not contain any
``#include`` statements and hence can be easily copied around. It
also makes it possible to re-grompp without having any special itp
files available.
:Arguments:
*topol*
topology file
*struct*
coordinat (structure) file
:Keywords:
*processed*
name of the new topology file; if not set then it is named like
*topol* but with ``pp_`` prepended
*includes*
path or list of paths of directories in which itp files are
searched for
*grompp_kwargs**
other options for :program:`grompp` such as ``maxwarn=2`` can
also be supplied
:Returns: full path to the processed topology |
5,857 | def _validate_timeout(seconds: float):
val = int(seconds * 1000)
assert 60000 <= val <= 4294967294, "Bad value: {}".format(val)
return val | Creates an int from 60000 to 4294967294 that represents a
valid millisecond wireless LAN timeout |
5,858 | def isa_to_graph(isa: ISA) -> nx.Graph:
return nx.from_edgelist(e.targets for e in isa.edges if not e.dead) | Construct a NetworkX qubit topology from an ISA object.
This discards information about supported gates.
:param isa: The ISA. |
5,859 | def _markup(p_todo, p_focus):
pri = p_todo.priority()
pri = + pri if pri else PaletteItem.DEFAULT
if not p_focus:
attr_dict = {None: pri}
else:
attr_dict = {None: pri + }
attr_dict[PaletteItem.PROJECT] = PaletteItem.PROJECT_FOCUS
attr_dict[PaletteItem.CONTEXT] = PaletteItem.CONTEXT_FOCUS
attr_dict[PaletteItem.METADATA] = PaletteItem.METADATA_FOCUS
attr_dict[PaletteItem.LINK] = PaletteItem.LINK_FOCUS
return attr_dict | Returns an attribute spec for the colors that correspond to the given todo
item. |
5,860 | def build_request(self, path, query_parameters):
url = + self.sanitise_path(path)
url += + urlencode(query_parameters)
return url | Build the HTTP request by adding query parameters to the path.
:param path: API endpoint/path to be used.
:param query_parameters: Query parameters to be added to the request.
:return: string |
5,861 | def refine_rectwv_coeff(input_image, rectwv_coeff,
refine_wavecalib_mode,
minimum_slitlet_width_mm,
maximum_slitlet_width_mm,
save_intermediate_results=False,
debugplot=0):
logger = logging.getLogger(__name__)
if save_intermediate_results:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages()
else:
pdf = None
main_header = input_image[0].header
filter_name = main_header[]
grism_name = main_header[]
if refine_wavecalib_mode not in [1, 2, 11, 12]:
logger.error(. format(
refine_wavecalib_mode
))
raise ValueError("Invalid wavelength calibration refinement mode")
if refine_wavecalib_mode in [1, 2]:
if grism_name == :
catlines_file =
else:
catlines_file =
dumdata = pkgutil.get_data(, catlines_file)
arc_lines_tmpfile = StringIO(dumdata.decode())
catlines = np.genfromtxt(arc_lines_tmpfile)
catlines_all_wave = catlines[:, 0]
catlines_all_flux = catlines[:, 1]
mode = refine_wavecalib_mode
elif refine_wavecalib_mode in [11, 12]:
dumdata = pkgutil.get_data(
,
)
oh_lines_tmpfile = StringIO(dumdata.decode())
catlines = np.genfromtxt(oh_lines_tmpfile)
catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0]))
catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2]))
mode = refine_wavecalib_mode - 10
else:
raise ValueError(.format(refine_wavecalib_mode))
refined_rectwv_coeff = deepcopy(rectwv_coeff)
logger.info()
sp_median = median_slitlets_rectified(
input_image,
mode=2,
minimum_slitlet_width_mm=minimum_slitlet_width_mm,
maximum_slitlet_width_mm=maximum_slitlet_width_mm
)[0].data
sp_median /= sp_median.max()
jmin, jmax = find_pix_borders(sp_median, 0)
naxis1 = main_header[]
naxis2 = main_header[]
crpix1 = main_header[]
crval1 = main_header[]
cdelt1 = main_header[]
xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1
if grism_name == :
wv_parameters = set_wv_parameters(filter_name, grism_name)
wave_min = wv_parameters[]
wave_max = wv_parameters[]
else:
wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1
wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1
logger.info(.format(wave_min))
logger.info(.format(wave_max))
lok1 = catlines_all_wave >= wave_min
lok2 = catlines_all_wave <= wave_max
catlines_reference_wave = catlines_all_wave[lok1*lok2]
catlines_reference_flux = catlines_all_flux[lok1*lok2]
catlines_reference_flux /= catlines_reference_flux.max()
csu_config = CsuConfiguration.define_from_header(main_header)
list_useful_slitlets = csu_config.widths_in_range_mm(
minwidth=minimum_slitlet_width_mm,
maxwidth=maximum_slitlet_width_mm
)
if len(refined_rectwv_coeff.missing_slitlets) > 0:
for iremove in refined_rectwv_coeff.missing_slitlets:
if iremove in list_useful_slitlets:
list_useful_slitlets.remove(iremove)
list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1))
if i not in list_useful_slitlets]
logger.info(.format(
list_useful_slitlets))
logger.info(.format(
list_not_useful_slitlets))
tempwidths = np.array([csu_config.csu_bar_slit_width(islitlet)
for islitlet in list_useful_slitlets])
widths_summary = summary(tempwidths)
logger.info()
logger.info(.format(widths_summary[]))
logger.info(.format(widths_summary[]))
logger.info(.format(widths_summary[]))
logger.info(.format(widths_summary[]))
logger.info(.format(widths_summary[]))
sigma_broadening = cdelt1 * widths_summary[]
xwave_reference, sp_reference = convolve_comb_lines(
catlines_reference_wave, catlines_reference_flux, sigma_broadening,
crpix1, crval1, cdelt1, naxis1
)
sp_reference /= sp_reference.max()
image2d_expected_lines = np.tile(sp_reference, (naxis2, 1))
hdu = fits.PrimaryHDU(data=image2d_expected_lines, header=main_header)
expected_cat_image = fits.HDUList([hdu])
if (abs(debugplot) % 10 != 0) or (pdf is not None):
ax = ximplotxy(xwave, sp_median, ,
xlabel=,
ylabel=,
title=,
label=, show=False)
ax.stem(catlines_reference_wave, catlines_reference_flux, ,
markerfmt=, basefmt=, label=)
ax.plot(xwave_reference, sp_reference, ,
label=)
ax.legend()
if pdf is not None:
pdf.savefig()
else:
pause_debugplot(debugplot=debugplot, pltshow=True)
baseline = np.percentile(sp_median[sp_median > 0], q=10)
if (abs(debugplot) % 10 != 0) or (pdf is not None):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sp_median, bins=1000, log=True)
ax.set_xlabel()
ax.set_ylabel()
ax.set_title()
ax.axvline(float(baseline), linestyle=, color=)
if pdf is not None:
pdf.savefig()
else:
geometry = (0, 0, 640, 480)
set_window_geometry(geometry)
plt.show()
lok = np.where(sp_median > 0)
sp_median[lok] -= baseline
logger.info()
global_offset, fpeak = periodic_corr1d(
sp_reference=sp_reference,
sp_offset=sp_median,
fminmax=None,
naround_zero=50,
plottitle=,
pdf=pdf,
debugplot=debugplot
)
logger.info(.format(-global_offset))
missing_slitlets = rectwv_coeff.missing_slitlets
if mode == 1:
for islitlet in range(1, EMIR_NBARS + 1):
if islitlet not in missing_slitlets:
i = islitlet - 1
dumdict = refined_rectwv_coeff.contents[i]
dumdict[][0] -= global_offset*cdelt1
elif mode == 2:
logger.info()
median_55sp = median_slitlets_rectified(input_image, mode=1)
offset_array = np.zeros(EMIR_NBARS)
xplot = []
yplot = []
xplot_skipped = []
yplot_skipped = []
cout =
for islitlet in range(1, EMIR_NBARS + 1):
if islitlet in list_useful_slitlets:
i = islitlet - 1
sp_median = median_55sp[0].data[i, :]
lok = np.where(sp_median > 0)
if np.any(lok):
baseline = np.percentile(sp_median[lok], q=10)
sp_median[lok] -= baseline
sp_median /= sp_median.max()
offset_array[i], fpeak = periodic_corr1d(
sp_reference=sp_reference,
sp_offset=median_55sp[0].data[i, :],
fminmax=None,
naround_zero=50,
plottitle=.format(
islitlet),
pdf=pdf,
debugplot=debugplot
)
else:
offset_array[i] = 0.0
dumdict = refined_rectwv_coeff.contents[i]
dumdict[][0] -= offset_array[i]*cdelt1
xplot.append(islitlet)
yplot.append(-offset_array[i])
wpoly_coeff_refined = check_wlcalib_sp(
sp=median_55sp[0].data[i, :],
crpix1=crpix1,
crval1=crval1-offset_array[i]*cdelt1,
cdelt1=cdelt1,
wv_master=catlines_reference_wave,
coeff_ini=dumdict[],
naxis1_ini=EMIR_NAXIS1,
title=.format(
islitlet),
ylogscale=False,
pdf=pdf,
debugplot=debugplot
)
dumdict[] = wpoly_coeff_refined
cout +=
else:
xplot_skipped.append(islitlet)
yplot_skipped.append(0)
cout +=
if islitlet % 10 == 0:
if cout != :
cout = str(islitlet // 10)
logger.info(cout)
stat_summary = summary(np.array(yplot))
logger.info()
logger.info(.format(stat_summary[]))
logger.info(.format(stat_summary[]))
logger.info(.format(stat_summary[]))
logger.info(.format(stat_summary[]))
logger.info(.format(stat_summary[
]))
if (abs(debugplot) % 10 != 0) or (pdf is not None):
ax = ximplotxy(xplot, yplot,
linestyle=, marker=, color=,
xlabel=,
ylabel=,
title=,
show=False, **{: })
if len(xplot_skipped) > 0:
ax.plot(xplot_skipped, yplot_skipped, )
ax.axhline(-global_offset, linestyle=, color=,
label=)
ax.legend()
if pdf is not None:
pdf.savefig()
else:
pause_debugplot(debugplot=debugplot, pltshow=True)
else:
raise ValueError(.format(mode))
if pdf is not None:
pdf.close()
return refined_rectwv_coeff, expected_cat_image | Refine RectWaveCoeff object using a catalogue of lines
One and only one among refine_with_oh_lines_mode and
refine_with_arc_lines must be different from zero.
Parameters
----------
input_image : HDUList object
Input 2D image.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
refine_wavecalib_mode : int
Integer, indicating the type of refinement:
0 : no refinement
1 : apply the same global offset to all the slitlets (using ARC lines)
2 : apply individual offset to each slitlet (using ARC lines)
11 : apply the same global offset to all the slitlets (using OH lines)
12 : apply individual offset to each slitlet (using OH lines)
minimum_slitlet_width_mm : float
Minimum slitlet width (mm) for a valid slitlet.
maximum_slitlet_width_mm : float
Maximum slitlet width (mm) for a valid slitlet.
save_intermediate_results : bool
If True, save plots in PDF files
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
refined_rectwv_coeff : RectWaveCoeff instance
Refined rectification and wavelength calibration coefficients
for the particular CSU configuration.
expected_cat_image : HDUList object
Output 2D image with the expected catalogued lines. |
5,862 | def _register_socket(s):
queue_to_app = Queue()
with _lock:
ApplicationLayer._to_app[s._sockid()] = queue_to_app
return ApplicationLayer._from_app, queue_to_app | Internal method used by socket emulation layer to create a new "upward"
queue for an app-layer socket and to register the socket object.
Returns two queues: "downward" (fromapp) and "upward" (toapp). |
5,863 | def add_bgp_speaker_to_dragent(self, bgp_dragent, body):
return self.post((self.agent_path + self.BGP_DRINSTANCES)
% bgp_dragent, body=body) | Adds a BGP speaker to Dynamic Routing agent. |
5,864 | def _handle_blacklisted_tag(self):
strip = lambda text: text.rstrip().lower()
while True:
this, next = self._read(), self._read(1)
if this is self.END:
self._fail_route()
elif this == "<" and next == "/":
self._head += 3
if self._read() != ">" or (strip(self._read(-1)) !=
strip(self._stack[1].text)):
self._head -= 1
self._emit_text("</")
continue
self._emit(tokens.TagOpenClose())
self._emit_text(self._read(-1))
self._emit(tokens.TagCloseClose())
return self._pop()
elif this == "&":
self._parse_entity()
else:
self._emit_text(this)
self._head += 1 | Handle the body of an HTML tag that is parser-blacklisted. |
5,865 | def open_file(link, session=None, stream=True):
if not isinstance(link, six.string_types):
try:
link = link.url_without_fragment
except AttributeError:
raise ValueError("Cannot parse url from unkown type: {0!r}".format(link))
if not is_valid_url(link) and os.path.exists(link):
link = path_to_url(link)
if is_file_url(link):
local_path = url_to_path(link)
if os.path.isdir(local_path):
raise ValueError("Cannot open directory for read: {}".format(link))
else:
with io.open(local_path, "rb") as local_file:
yield local_file
else:
headers = {"Accept-Encoding": "identity"}
if not session:
from requests import Session
session = Session()
with session.get(link, headers=headers, stream=stream) as resp:
try:
raw = getattr(resp, "raw", None)
result = raw if raw else resp
yield result
finally:
if raw:
conn = getattr(raw, "_connection")
if conn is not None:
conn.close()
result.close() | Open local or remote file for reading.
:type link: pip._internal.index.Link or str
:type session: requests.Session
:param bool stream: Try to stream if remote, default True
:raises ValueError: If link points to a local directory.
:return: a context manager to the opened file-like object |
5,866 | def iter_sections(self, order=Tree.ipreorder, neurite_order=NeuriteIter.FileOrder):
return iter_sections(self, iterator_type=order, neurite_order=neurite_order) | iteration over section nodes
Parameters:
order: section iteration order within a given neurite. Must be one of:
Tree.ipreorder: Depth-first pre-order iteration of tree nodes
Tree.ipreorder: Depth-first post-order iteration of tree nodes
Tree.iupstream: Iterate from a tree node to the root nodes
Tree.ibifurcation_point: Iterator to bifurcation points
Tree.ileaf: Iterator to all leaves of a tree
neurite_order: order upon which neurites should be iterated. Values:
- NeuriteIter.FileOrder: order of appearance in the file
- NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical |
5,867 | def if_else(self, pred, likely=None):
bb = self.basic_block
bbif = self.append_basic_block(name=_label_suffix(bb.name, ))
bbelse = self.append_basic_block(name=_label_suffix(bb.name, ))
bbend = self.append_basic_block(name=_label_suffix(bb.name, ))
br = self.cbranch(pred, bbif, bbelse)
if likely is not None:
br.set_weights([99, 1] if likely else [1, 99])
then = self._branch_helper(bbif, bbend)
otherwise = self._branch_helper(bbelse, bbend)
yield then, otherwise
self.position_at_end(bbend) | A context manager which sets up two conditional basic blocks based
on the given predicate (a i1 value).
A tuple of context managers is yield'ed. Each context manager
acts as a if_then() block.
*likely* has the same meaning as in if_then().
Typical use::
with builder.if_else(pred) as (then, otherwise):
with then:
# emit instructions for when the predicate is true
with otherwise:
# emit instructions for when the predicate is false |
5,868 | def sha256(content):
if isinstance(content, str):
content = content.encode()
return hashlib.sha256(content).hexdigest() | Finds the sha256 hash of the content. |
5,869 | def internal_only(view_func):
@functools.wraps(view_func)
def wrapper(request, *args, **kwargs):
forwards = request.META.get("HTTP_X_FORWARDED_FOR", "").split(",")
if len(forwards) > 1:
raise PermissionDenied()
return view_func(request, *args, **kwargs)
return wrapper | A view decorator which blocks access for requests coming through the load balancer. |
5,870 | def time_relaxations_direct(P, p0, obs, times=[1]):
r
n_t = len(times)
times = np.sort(times)
if times[-1] > P.shape[0]:
use_diagonalization = True
R, D, L = rdl_decomposition(P)
if not np.any(np.iscomplex(R)):
R = np.real(R)
if not np.any(np.iscomplex(D)):
D = np.real(D)
if not np.any(np.iscomplex(L)):
L = np.real(L)
rdl = (R, D, L)
f = np.empty(n_t, dtype=D.dtype)
if use_diagonalization:
for i in range(n_t):
f[i] = time_relaxation_direct_by_diagonalization(P, p0, obs, times[i], rdl)
else:
start_values = None
for i in range(n_t):
f[i], start_values = time_relaxation_direct_by_mtx_vec_prod(P, p0, obs, times[i], start_values, True)
return f | r"""Compute time-relaxations of obs with respect of given initial distribution.
relaxation(k) = p0 P^k obs
Parameters
----------
P : ndarray, shape=(n, n) or scipy.sparse matrix
Transition matrix
p0 : ndarray, shape=(n)
initial distribution
obs : ndarray, shape=(n)
Vector representing observable on discrete states.
times : array-like, shape(n_t)
Vector of time points at which the (auto)correlation will be evaluated
Returns
-------
relaxations : ndarray, shape(n_t) |
5,871 | def _get_timit(directory):
if os.path.exists(os.path.join(directory, "timit")):
return
assert FLAGS.timit_paths
for path in FLAGS.timit_paths.split(","):
with tf.gfile.GFile(path) as f:
with tarfile.open(fileobj=f, mode="r:gz") as timit_compressed:
timit_compressed.extractall(directory) | Extract TIMIT datasets to directory unless directory/timit exists. |
5,872 | def run_command(self, args):
parsed_args = self.parser.parse_args(args)
if hasattr(parsed_args, ):
parsed_args.func(parsed_args)
else:
self.parser.print_help() | Parse command line arguments and run function registered for the appropriate command.
:param args: [str] command line arguments |
5,873 | def get_list_database(self):
url = "db"
response = self.request(
url=url,
method=,
expected_response_code=200
)
return response.json() | Get the list of databases. |
5,874 | def _descendants(self):
children = self._children
if children is not None:
for child in children.values():
yield from child._descendants
yield child | Scans full list of node descendants.
:return: Generator of nodes. |
5,875 | def update(self):
self.t += 1000 / INTERVAL
self.average *= np.random.lognormal(0, 0.04)
high = self.average * np.exp(np.abs(np.random.gamma(1, 0.03)))
low = self.average / np.exp(np.abs(np.random.gamma(1, 0.03)))
delta = high - low
open = low + delta * np.random.uniform(0.05, 0.95)
close = low + delta * np.random.uniform(0.05, 0.95)
color = "darkgreen" if open < close else "darkred"
for k, point in [(, self.t),
(, self.average),
(, open),
(, high),
(, low),
(, close),
(, color)]:
self.data[k].append(point)
ema12 = self._ema(self.data[], self.kernel12)
ema26 = self._ema(self.data[], self.kernel26)
macd = ema12 - ema26
self.data[].append(ema12)
self.data[].append(macd)
macd9 = self._ema(self.data[], self.kernel9)
self.data[].append(macd9)
self.data[].append(macd - macd9) | Compute the next element in the stream, and update the plot data |
5,876 | def UpdateUser(self, user, ssh_keys):
if not bool(USER_REGEX.match(user)):
self.logger.warning(, user)
return False
if not self._GetUser(user):
if not (self._AddUser(user)
and self._UpdateUserGroups(user, self.groups)):
return False
if not self._UpdateSudoer(user, sudoer=True):
return False
self.logger.warning(message, user, str(e))
return False
else:
return True | Update a Linux user with authorized SSH keys.
Args:
user: string, the name of the Linux user account.
ssh_keys: list, the SSH key strings associated with the user.
Returns:
bool, True if the user account updated successfully. |
5,877 | def remove_point(self, time):
if self.tier_type != :
raise Exception()
self.intervals = [i for i in self.intervals if i[0] != time] | Remove a point, if no point is found nothing happens.
:param int time: Time of the point.
:raises TierTypeException: If the tier is not a TextTier. |
5,878 | def shutdown(self, reason = ConnectionClosed()):
if self._shutdown:
raise ShutdownError()
self.stop()
self._closing = True
for connection in self.connections:
connection.close()
self.connections = set()
self._shutdown = True
if isinstance(reason, ConnectionClosed):
logger.info("server shutdown")
else:
logger.warn("server shutdown, reason %s" % str(reason)) | Shutdown the socket server.
The socket server will stop accepting incoming connections.
All connections will be dropped. |
5,879 | def connect(args):
p = OptionParser(connect.__doc__)
p.add_option("--clip", default=2000, type="int",
help="Only consider end of contigs [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, blastfile = args
clip = opts.clip
sizes = Sizes(fastafile).mapping
blast = Blast(blastfile)
blasts = []
for b in blast:
seqid = b.subject
size = sizes[seqid]
start, end = b.sstart, b.sstop
cstart, cend = min(size, clip), max(0, size - clip)
if start > cstart and end < cend:
continue
blasts.append(b)
key = lambda x: x.query
blasts.sort(key=key)
g = BiGraph()
for query, bb in groupby(blasts, key=key):
bb = sorted(bb, key=lambda x: x.qstart)
nsubjects = len(set(x.subject for x in bb))
if nsubjects == 1:
continue
print("\n".join(str(x) for x in bb))
for a, b in pairwise(bb):
astart, astop = a.qstart, a.qstop
bstart, bstop = b.qstart, b.qstop
if a.subject == b.subject:
continue
arange = astart, astop
brange = bstart, bstop
ov = range_intersect(arange, brange)
alen = astop - astart + 1
blen = bstop - bstart + 1
if ov:
ostart, ostop = ov
ov = ostop - ostart + 1
print(ov, alen, blen)
if ov and (ov > alen / 2 or ov > blen / 2):
print("Too much overlap ({0})".format(ov))
continue
asub = a.subject
bsub = b.subject
atag = ">" if a.orientation == "+" else "<"
btag = ">" if b.orientation == "+" else "<"
g.add_edge(asub, bsub, atag, btag)
graph_to_agp(g, blastfile, fastafile, verbose=False) | %prog connect assembly.fasta read_mapping.blast
Connect contigs using long reads. |
5,880 | def competition_view_leaderboard(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.competition_view_leaderboard_with_http_info(id, **kwargs)
else:
(data) = self.competition_view_leaderboard_with_http_info(id, **kwargs)
return data | VIew competition leaderboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.competition_view_leaderboard(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Competition name (required)
:return: Result
If the method is called asynchronously,
returns the request thread. |
5,881 | def matchmaker_delete(institute_id, case_name):
user_obj = store.user(current_user.email)
if not in user_obj[]:
flash(, )
return redirect(request.referrer)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
mme_base_url = current_app.config.get()
mme_token = current_app.config.get()
if not mme_base_url or not mme_token:
flash(, )
return redirect(request.referrer)
delete_result = controllers.mme_delete(case_obj, mme_base_url, mme_token)
n_deleted = 0
category =
for resp in delete_result:
if resp[] == 200:
n_deleted += 1
else:
flash(resp[], category)
if n_deleted:
category =
user_obj = store.user(current_user.email)
store.case_mme_delete(case_obj=case_obj, user_obj=user_obj)
flash(.format(n_deleted, len(delete_result)), category)
return redirect(request.referrer) | Remove a case from MatchMaker |
5,882 | def plot_periodicvar_recovery_results(
precvar_results,
aliases_count_as_recovered=None,
magbins=None,
periodbins=None,
amplitudebins=None,
ndetbins=None,
minbinsize=1,
plotfile_ext=,
):
allrecoveredactualrecoveredtwicehalfratio_over_1plusratio_over_1minusratio_over_1plus_twiceratio_over_1minus_twiceratio_over_1plus_thriceratio_over_1minus_thriceratio_over_minus1ratio_over_twice_minus1allrecoveredpngpdf extension.
Returns
-------
dict
A dict containing recovery fraction statistics and the paths to each of
the plots made.
rbcould not understand the input periodic var recovery dict/picklell need the magbins
simbasedir = precvar[]
lcinfof = os.path.join(simbasedir,)
if not os.path.exists(lcinfof):
LOGERROR(t continuerbmagcolsobjectidndetsdssractual_periodicvarsgetting sdssr and ndet for actual periodic vars...getting periods, vartypes, amplitudes, ndet for actual periodic vars...detailsactual_varperioddetailsactual_varamplitudedetailsactual_vartypebinning actual periodic vars by magnitude...binning actual periodic vars by period...binning actual periodic vars by variability amplitude...binning actual periodic vars by ndet...recoveredactualunknown alias type: %s, skippingalldetailsobjectiddetailsdetailsdetailsbest_recovered_statusrecovered %s/%s periodic variables (frac: %.3f) with period recovery status: %s, simbasedirprecvar_resultsmagcolsobjectidsndetsdssractual_periodicvarsrecovered_periodicvarsrecovery_definitionmagbinsmagbinned_magsmagbinned_periodicvarsmagbinned_recoveredvarsmagbinned_recfracperiodbinsperiodbinned_periodsperiodbinned_periodicvarsperiodbinned_recoveredvarsperiodbinned_recfracamplitudebinsamplitudebinned_amplitudesamplitudebinned_periodicvarsamplitudebinned_recoveredvarsamplitudebinned_recfracndetbinsndetbinned_ndetsndetbinned_periodicvarsndetbinned_recoveredvarsndetbinned_recfracdetailsrecovery_pfmethodsdetailsdetailsactual_vartypedetailsdetailsactual_vartypealiastypespfmethodsvartypesperiodic-variable-recovery-plots.SDSS $r$ magnituderecovered fraction of periodic variablesoverall recovery fraction by periodic var magnitudesrecfrac-binned-magnitudes-overall.%stightalldetailsbest_recovered_magcol.magcol: %s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper magcol recovery fraction by periodic var magnitudesrecfrac-binned-magnitudes-magcols.%stightalldetailsrecovery_pfmethodsdetailsdetailsbest_recovered_pfmethod.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper period-finder recovery fraction by periodic var magnitudesrecfrac-binned-magnitudes-pfmethod.%stightalldetailsactual_vartypedetailsdetailsactual_vartypedetailsactual_vartype.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper vartype recovery fraction by periodic var magnitudesrecfrac-binned-magnitudes-vartype.%stightalldetailsbest_recovered_status.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper alias-type recovery fraction by periodic var magnitudesrecfrac-binned-magnitudes-aliastype.%stightall.periodic variable period [days]recovered fraction of periodic variablesoverall recovery fraction by periodic var periodsrecfrac-binned-periods-overall.%stightalldetailsbest_recovered_magcol.magcol: %s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper magcol recovery fraction by periodic var periodsrecfrac-binned-periods-magcols.%stightalldetailsrecovery_pfmethodsdetailsdetailsbest_recovered_pfmethod.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper period-finder recovery fraction by periodic var periodsrecfrac-binned-periods-pfmethod.%stightalldetailsactual_vartypedetailsdetailsactual_vartypedetailsactual_vartype.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper vartype recovery fraction by periodic var magnitudesrecfrac-binned-periods-vartype.%stightalldetailsbest_recovered_status.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper alias-type recovery fraction by periodic var magnitudesrecfrac-binned-periods-aliastype.%stightall.periodic variable amplitude [mag]recovered fraction of periodic variablesoverall recovery fraction by periodic var amplitudesrecfrac-binned-amplitudes-overall.%stightalldetailsbest_recovered_magcol.magcol: %s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper magcol recovery fraction by periodic var amplitudesrecfrac-binned-amplitudes-magcols.%stightalldetailsrecovery_pfmethodsdetailsdetailsbest_recovered_pfmethod.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper period-finder recovery fraction by periodic var amplitudesrecfrac-binned-amplitudes-pfmethod.%stightalldetailsactual_vartypedetailsdetailsactual_vartypedetailsactual_vartype.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper vartype recovery fraction by periodic var amplitudesrecfrac-binned-amplitudes-vartype.%stightalldetailsbest_recovered_status.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper alias-type recovery fraction by periodic var amplitudesrecfrac-binned-amplitudes-aliastype.%stightall.periodic variable light curve pointsrecovered fraction of periodic variablesoverall recovery fraction by periodic var ndetrecfrac-binned-ndet-overall.%stightalldetailsbest_recovered_magcol.magcol: %s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper magcol recovery fraction by periodic var ndetsrecfrac-binned-ndet-magcols.%stightalldetailsrecovery_pfmethodsdetailsdetailsbest_recovered_pfmethod.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper period-finder recovery fraction by periodic var ndetsrecfrac-binned-ndet-pfmethod.%stightalldetailsactual_vartypedetailsdetailsactual_vartypedetailsactual_vartype.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper vartype recovery fraction by periodic var ndetsrecfrac-binned-ndet-vartype.%stightalldetailsbest_recovered_status.%s.overallkSDSS $r$ magnituderecovered fraction of periodic variablesper alias-type recovery fraction by periodic var ndetsrecfrac-binned-ndet-aliastype.%stightallmagbinned_per_magcol_recfracsmagbinned_per_pfmethod_recfracsmagbinned_per_vartype_recfracsmagbinned_per_aliastype_recfracsperiodbinned_per_magcol_recfracsperiodbinned_per_pfmethod_recfracsperiodbinned_per_vartype_recfracsperiodbinned_per_aliastype_recfracsamplitudebinned_per_magcol_recfracsamplitudebinned_per_pfmethod_recfracsamplitudebinned_per_vartype_recfracsamplitudebinned_per_aliastype_recfracsndetbinned_per_magcol_recfracsndetbinned_per_pfmethod_recfracsndetbinned_per_vartype_recfracsndetbinned_per_aliastype_recfracsdetailsdetailsbest_recovered_pfmethoddetailsdetailsactual_vartypedetailsdetailsbest_recovered_magcoldetailsdetailsbest_recovered_statusoverall_recfrac_per_pfmethodoverall_recfrac_per_vartypeoverall_recfrac_per_magcoloverall_recfrac_per_aliastypeoverall_recfrac_per_pfmethodperiod-finding methodoverall recovery rateoverall recovery rate per period-finding methodrecfrac-overall-pfmethod.%stightalloverall_recfrac_per_magcollight curve magnitude columnoverall recovery rateoverall recovery rate per light curve magcolrecfrac-overall-magcol.%stightalloverall_recfrac_per_aliastypeperiod recovery statusoverall recovery rateoverall recovery rate per period recovery statusrecfrac-overall-aliastype.%stightalloverall_recfrac_per_vartypeperiodic variable typeoverall recovery rateoverall recovery rate per periodic variable typerecfrac-overall-vartype.%stightallt actual
notvariable_recovered_periods = np.concatenate([
precvar[][x][]
for x in precvar[] if
(precvar[][x][] is None)
])
notvariable_recovered_lspvals = np.concatenate([
precvar[][x][]
for x in precvar[] if
(precvar[][x][] is None)
])
sortind = np.argsort(notvariable_recovered_periods)
notvariable_recovered_periods = notvariable_recovered_periods[sortind]
notvariable_recovered_lspvals = notvariable_recovered_lspvals[sortind]
outdict[] = notvariable_recovered_periods
outdict[] = notvariable_recovered_lspvals
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(notvariable_recovered_periods,
notvariable_recovered_lspvals,
ms=1.0,linestyle=,marker=)
plt.xscale()
plt.xlabel()
plt.ylabel()
plt.title()
plt.savefig(
os.path.join(recplotdir,
% plotfile_ext),
dpi=100,
bbox_inches=
)
plt.close()
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.hist(notvariable_recovered_periods,bins=np.arange(0.02,300.0,1.0e-3),
histtype=)
plt.xscale()
plt.xlabel()
plt.ylabel()
plt.title()
plt.savefig(
os.path.join(recplotdir,
% plotfile_ext),
dpi=100,
bbox_inches=
)
plt.close()
outfile = os.path.join(simbasedir, )
with open(outfile,) as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict | This plots the results of periodic var recovery.
This function makes plots for periodicvar recovered fraction as a function
of:
- magbin
- periodbin
- amplitude of variability
- ndet
with plot lines broken down by:
- magcol
- periodfinder
- vartype
- recovery status
The kwargs `magbins`, `periodbins`, `amplitudebins`, and `ndetbins` can be
used to set the bin lists as needed. The kwarg `minbinsize` controls how
many elements per bin are required to accept a bin in processing its
recovery characteristics for mags, periods, amplitudes, and ndets.
Parameters
----------
precvar_results : dict or str
This is either a dict returned by parallel_periodicvar_recovery or the
pickle created by that function.
aliases_count_as_recovered : list of str or 'all'
This is used to set which kinds of aliases this function considers as
'recovered' objects. Normally, we require that recovered objects have a
recovery status of 'actual' to indicate the actual period was
recovered. To change this default behavior, aliases_count_as_recovered
can be set to a list of alias status strings that should be considered
as 'recovered' objects as well. Choose from the following alias types::
'twice' recovered_p = 2.0*actual_p
'half' recovered_p = 0.5*actual_p
'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p)
'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p)
'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p)
'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p)
'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p)
'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p)
'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0)
'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0)
or set `aliases_count_as_recovered='all'` to include all of the above in
the 'recovered' periodic var list.
magbins : np.array
The magnitude bins to plot the recovery rate results over. If None, the
default mag bins will be used: `np.arange(8.0,16.25,0.25)`.
periodbins : np.array
The period bins to plot the recovery rate results over. If None, the
default period bins will be used: `np.arange(0.0,500.0,0.5)`.
amplitudebins : np.array
The variability amplitude bins to plot the recovery rate results
over. If None, the default amplitude bins will be used:
`np.arange(0.0,2.0,0.05)`.
ndetbins : np.array
The ndet bins to plot the recovery rate results over. If None, the
default ndet bins will be used: `np.arange(0.0,60000.0,1000.0)`.
minbinsize : int
The minimum number of objects per bin required to plot a bin and its
recovery fraction on the plot.
plotfile_ext : {'png','pdf'}
Sets the plot output files' extension.
Returns
-------
dict
A dict containing recovery fraction statistics and the paths to each of
the plots made. |
5,883 | def getDescendantsUIDs(self, all_descendants=False):
descendants = self.getDescendants(all_descendants=all_descendants)
return map(api.get_uid, descendants) | Returns the UIDs of the descendant Analysis Requests
This method is used as metadata |
5,884 | def get_installation_order(self, req_set):
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._discovered_dependencies[req.name]:
schedule(dep)
order.append(req)
for install_req in req_set.requirements.values():
schedule(install_req)
return order | Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees. |
5,885 | def _X509__asn1date_to_datetime(asn1date):
bio = Membio()
libcrypto.ASN1_TIME_print(bio.bio, asn1date)
pydate = datetime.strptime(str(bio), "%b %d %H:%M:%S %Y %Z")
return pydate.replace(tzinfo=utc) | Converts openssl ASN1_TIME object to python datetime.datetime |
5,886 | def start(main_gui_class, **kwargs):
debug = kwargs.pop(, False)
standalone = kwargs.pop(, False)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO,
format=)
logging.getLogger().setLevel(
level=logging.DEBUG if debug else logging.INFO)
if standalone:
s = StandaloneServer(main_gui_class, start=True, **kwargs)
else:
s = Server(main_gui_class, start=True, **kwargs) | This method starts the webserver with a specific App subclass. |
5,887 | def _time_to_string(self, dt, conversion_string="%Y %m %d %H %M"):
if self.output_timezone is not None:
dt = dt.replace(tzinfo=utc) \
.astimezone(self.output_timezone)
return dt.strftime(conversion_string) | This converts a UTC time integer to a string |
5,888 | def make(self):
try:
self.mkfile(self.lock_file)
except Exception as e:
self.die(.format(str(e))) | Make the lock file. |
5,889 | def write_quotes(self, quotes):
if self.first:
Base.metadata.create_all(self.engine, checkfirst=True)
self.first=False
session=self.getWriteSession()
session.add_all([self.__quoteToSql(quote) for quote in quotes]) | write quotes |
5,890 | def _createIndexesFor(self, tableClass, extantIndexes):
try:
indexes = _requiredTableIndexes[tableClass]
except KeyError:
indexes = set()
for nam, atr in tableClass.getSchema():
if atr.indexed:
indexes.add(((atr.getShortColumnName(self),), (atr.attrname,)))
for compound in atr.compoundIndexes:
indexes.add((tuple(inatr.getShortColumnName(self) for inatr in compound),
tuple(inatr.attrname for inatr in compound)))
_requiredTableIndexes[tableClass] = indexes
self.createSQL(csql) | Create any indexes which don't exist and are required by the schema
defined by C{tableClass}.
@param tableClass: A L{MetaItem} instance which may define a schema
which includes indexes.
@param extantIndexes: A container (anything which can be the right-hand
argument to the C{in} operator) which contains the unqualified
names of all indexes which already exist in the underlying database
and do not need to be created. |
5,891 | def accept(kind, doc=None, error_text=None, exception_handlers=empty.dict, accept_context=False):
return create(
doc,
error_text,
exception_handlers=exception_handlers,
chain=False,
accept_context=accept_context
)(kind) | Allows quick wrapping of any Python type cast function for use as a hug type annotation |
5,892 | def load(fh, single=False, version=_default_version,
strict=False, errors=):
if isinstance(fh, stringtypes):
s = open(fh, ).read()
else:
s = fh.read()
return loads(s, single=single, version=version,
strict=strict, errors=errors) | Deserialize SimpleMRSs from a file (handle or filename)
Args:
fh (str, file): input filename or file object
single: if `True`, only return the first read Xmrs object
strict: deprecated; a `True` value is the same as
`errors='strict'`, and a `False` value is the same as
`errors='warn'`
errors: if `'strict'`, ill-formed MRSs raise an error; if
`'warn'`, raise a warning instead; if `'ignore'`, do not
warn or raise errors for ill-formed MRSs
Returns:
a generator of Xmrs objects (unless the *single* option is
`True`) |
5,893 | def get_blob(
self, blob_name, client=None, encryption_key=None, generation=None, **kwargs
):
blob = Blob(
bucket=self,
name=blob_name,
encryption_key=encryption_key,
generation=generation,
**kwargs
)
try:
blob.reload(client=client)
except NotFound:
return None
else:
return blob | Get a blob object by name.
This will return None if the blob doesn't exist:
.. literalinclude:: snippets.py
:start-after: [START get_blob]
:end-before: [END get_blob]
If :attr:`user_project` is set, bills the API request to that project.
:type blob_name: str
:param blob_name: The name of the blob to retrieve.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:type encryption_key: bytes
:param encryption_key:
Optional 32 byte encryption key for customer-supplied encryption.
See
https://cloud.google.com/storage/docs/encryption#customer-supplied.
:type generation: long
:param generation: Optional. If present, selects a specific revision of
this object.
:type kwargs: dict
:param kwargs: Keyword arguments to pass to the
:class:`~google.cloud.storage.blob.Blob` constructor.
:rtype: :class:`google.cloud.storage.blob.Blob` or None
:returns: The blob object if it exists, otherwise None. |
5,894 | def envs(self):
ret = []
for saltenv in self.opts[]:
ret.append(saltenv)
return ret | Return the available environments |
5,895 | def get_word_index(tokens, char_index):
for (i, token) in enumerate(tokens):
if token[] == 0:
continue
if token[] <= char_index and char_index <= token[]:
return i
return 0 | Given word return word index. |
5,896 | def _encode_attribute(self, name, type_):
for char in :
if char in name:
name = %name
break
if isinstance(type_, (tuple, list)):
type_tmp = [u % encode_string(type_k) for type_k in type_]
type_ = u%(u.join(type_tmp))
return u%(_TK_ATTRIBUTE, name, type_) | (INTERNAL) Encodes an attribute line.
The attribute follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
This method must receive a the name of the attribute and its type, if
the attribute type is nominal, ``type`` must be a list of values.
:param name: a string.
:param type_: a string or a list of string.
:return: a string with the encoded attribute declaration. |
5,897 | def lemmatize(self, text, best_guess=True, return_frequencies=False):
if isinstance(text, str):
tokens = wordpunct_tokenize(text)
elif isinstance(text, list):
tokens= text
else:
raise TypeError("lemmatize only works with strings or lists of string tokens.")
return [self._lemmatize_token(token, best_guess, return_frequencies) for token in tokens] | Lemmatize all tokens in a string or a list. A string is first tokenized using punkt.
Throw a type error if the input is neither a string nor a list. |
5,898 | def get_version():
VERSION_FILE =
mo = re.search(r"]([^\"]rtUnable to find version string in {0}.'.format(VERSION_FILE)) | Extracts the version number from the version.py file. |
5,899 | def plot_points(points, show=True):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
points = np.asanyarray(points, dtype=np.float64)
if len(points.shape) != 2:
raise ValueError()
if points.shape[1] == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection=)
ax.scatter(*points.T)
elif points.shape[1] == 2:
plt.scatter(*points.T)
else:
raise ValueError(.format(
points.shape))
if show:
plt.show() | Plot an (n,3) list of points using matplotlib
Parameters
-------------
points : (n, 3) float
Points in space
show : bool
If False, will not show until plt.show() is called |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.