Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
4,100 | def function(fname):
def _f(func):
class WrapFunction(Function):
name = fname
def __call__(self, *args, **kwargs):
return func(*args, **kwargs)
return WrapFunction
return _f | Make a function to Function class |
4,101 | def _requirement_element(self, parent_element, req_data):
req_data = self._transform_result(req_data)
if not req_data:
return
title = req_data.get("title")
if not title:
logger.warning("Skipping requirement, title is missing")
return
req_id = req_data.get("id")
if not self._check_lookup_prop(req_id):
logger.warning(
"Skipping requirement `%s`, data missing for selected lookup method", title
)
return
attrs, custom_fields = self._classify_data(req_data)
attrs, custom_fields = self._fill_defaults(attrs, custom_fields)
attrs = OrderedDict(sorted(attrs.items()))
custom_fields = OrderedDict(sorted(custom_fields.items()))
requirement = etree.SubElement(parent_element, "requirement", attrs)
title_el = etree.SubElement(requirement, "title")
title_el.text = title
description = req_data.get("description")
if description:
description_el = etree.SubElement(requirement, "description")
description_el.text = description
self._fill_custom_fields(requirement, custom_fields) | Adds requirement XML element. |
4,102 | def sort_direction(self):
if self.table._meta.order_by == self.name:
return "asc"
elif self.table._meta.order_by == ("-" + self.name):
return "desc"
else:
return None | Return the direction in which the linked table is is sorted by
this column ("asc" or "desc"), or None this column is unsorted. |
4,103 | def safe_compare_digest(val1, val2):
if len(val1) != len(val2):
return False
result = 0
if PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
for i, j in zip(val1, val2):
result |= i ^ j
else:
for i, j in zip(val1, val2):
result |= (ord(i) ^ ord(j))
return result == 0 | safe_compare_digest method.
:param val1: string or bytes for compare
:type val1: str | bytes
:param val2: string or bytes for compare
:type val2: str | bytes |
4,104 | def sort_schemas(schemas):
def keyfun(v):
x = SQL_SCHEMA_REGEXP.match(v).groups()
return (int(x[0]), x[1], int(x[2]) if x[2] else None,
x[3] if x[3] else , int(x[4]))
return sorted(schemas, key=keyfun) | Sort a list of SQL schemas in order |
4,105 | def check_model(self, max_paths=1, max_path_length=5):
results = []
for stmt in self.statements:
result = self.check_statement(stmt, max_paths, max_path_length)
results.append((stmt, result))
return results | Check all the statements added to the ModelChecker.
Parameters
----------
max_paths : Optional[int]
The maximum number of specific paths to return for each Statement
to be explained. Default: 1
max_path_length : Optional[int]
The maximum length of specific paths to return. Default: 5
Returns
-------
list of (Statement, PathResult)
Each tuple contains the Statement checked against the model and
a PathResult object describing the results of model checking. |
4,106 | def update_serviceprofile(self, host_id, vlan_id):
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info(
, str(host_id))
return False
service_profile = self.ucsm_sp_dict.get((ucsm_ip, host_id))
if service_profile:
LOG.debug(,
service_profile)
else:
LOG.info(
, host_id)
return False
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error(
, str(vlan_id))
return False
if not self._update_service_profile(handle,
service_profile,
vlan_id,
ucsm_ip):
LOG.error(
,
{: service_profile, : ucsm_ip})
return False
return True | Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports. |
4,107 | def _init_map(self):
QuestionFilesFormRecord._init_map(self)
FirstAngleProjectionFormRecord._init_map(self)
super(MultiChoiceOrthoQuestionFormRecord, self)._init_map() | stub |
4,108 | def _loadf(ins):
output = _float_oper(ins.quad[2])
output.extend(_fpush())
return output | Loads a floating point value from a memory address.
If 2nd arg. start with '*', it is always treated as
an indirect value. |
4,109 | def values_update(self, range, params=None, body=None):
url = SPREADSHEET_VALUES_URL % (self.id, quote(range))
r = self.client.request(, url, params=params, json=body)
return r.json() | Lower-level method that directly calls `spreadsheets.values.update <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update>`_.
:param str range: The `A1 notation <https://developers.google.com/sheets/api/guides/concepts#a1_notation>`_ of the values to update.
:param dict params: (optional) `Query parameters <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#query-parameters>`_.
:param dict body: (optional) `Request body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#request-body>`_.
:returns: `Response body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#response-body>`_.
:rtype: dict
Example::
sh.values_update(
'Sheet1!A2',
params={
'valueInputOption': 'USER_ENTERED'
},
body={
'values': [[1, 2, 3]]
}
)
.. versionadded:: 3.0 |
4,110 | def _build_file_writer(cls, session: AppSession):
args = session.args
if args.delete_after:
return session.factory.new()
elif args.output_document:
session.factory.class_map[] = SingleDocumentWriter
return session.factory.new(, args.output_document,
headers_included=args.save_headers)
use_dir = (len(args.urls) != 1 or args.page_requisites
or args.recursive)
if args.use_directories == :
use_dir = True
elif args.use_directories == :
use_dir = False
os_type = if in args.restrict_file_names \
else
ascii_only = in args.restrict_file_names
no_control = not in args.restrict_file_names
if in args.restrict_file_names:
case =
elif in args.restrict_file_names:
case =
else:
case = None
path_namer = session.factory.new(
,
args.directory_prefix,
index=args.default_page,
use_dir=use_dir,
cut=args.cut_dirs,
protocol=args.protocol_directories,
hostname=args.host_directories,
os_type=os_type,
ascii_only=ascii_only,
no_control=no_control,
case=case,
max_filename_length=args.max_filename_length,
)
if args.recursive or args.page_requisites or args.continue_download:
if args.clobber_method == :
file_class = OverwriteFileWriter
else:
file_class = IgnoreFileWriter
elif args.timestamping:
file_class = TimestampingFileWriter
else:
file_class = AntiClobberFileWriter
session.factory.class_map[] = file_class
return session.factory.new(
,
path_namer,
file_continuing=args.continue_download,
headers_included=args.save_headers,
local_timestamping=args.use_server_timestamps,
adjust_extension=args.adjust_extension,
content_disposition=args.content_disposition,
trust_server_names=args.trust_server_names,
) | Create the File Writer.
Returns:
FileWriter: An instance of :class:`.writer.BaseFileWriter`. |
4,111 | def _control(self, state):
if not self._subscription_is_recent():
self._subscribe()
cmd = MAGIC + CONTROL + self._mac + PADDING_1 + PADDING_2 + state
_LOGGER.debug("Sending new state to %s: %s", self.host, ord(state))
ack_state = self._udp_transact(cmd, self._control_resp, state)
if ack_state is None:
raise S20Exception(
"Device didn't acknowledge control request: {}".format(
self.host)) | Control device state.
Possible states are ON or OFF.
:param state: Switch to this state. |
4,112 | def assert_valid_rule_class(clazz):
if not (issubclass(clazz, rules.LineRule) or issubclass(clazz, rules.CommitRule)):
msg = u"User-defined rule class must extend from {1}.{2} or {1}.{3}"
raise UserRuleError(msg.format(clazz.__name__, rules.CommitRule.__module__,
rules.LineRule.__name__, rules.CommitRule.__name__))
if not hasattr(clazz, ) or clazz.id is None or not clazz.id:
raise UserRuleError(u"User-defined rule class must have an attribute".format(clazz.__name__))
if not hasattr(clazz, ) or not inspect.isroutine(clazz.validate):
raise UserRuleError(u"User-defined rule class must have a method".format(clazz.__name__))
if issubclass(clazz, rules.LineRule):
if clazz.target not in [rules.CommitMessageTitle, rules.CommitMessageBody]:
msg = u"The target attribute of the user-defined LineRule class must be either {1}.{2} or {1}.{3}"
msg = msg.format(clazz.__name__, rules.CommitMessageTitle.__module__,
rules.CommitMessageTitle.__name__, rules.CommitMessageBody.__name__)
raise UserRuleError(msg) | Asserts that a given rule clazz is valid by checking a number of its properties:
- Rules must extend from LineRule or CommitRule
- Rule classes must have id and name string attributes.
The options_spec is optional, but if set, it must be a list of gitlint Options.
- Rule classes must have a validate method. In case of a CommitRule, validate must take a single commit parameter.
In case of LineRule, validate must take line and commit as first and second parameters.
- LineRule classes must have a target class attributes that is set to either
CommitMessageTitle or CommitMessageBody.
- User Rule id's cannot start with R, T, B or M as these rule ids are reserved for gitlint itself. |
4,113 | def p_values(self, p):
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1] | values :
| values value VALUE_SEPARATOR
| values value |
4,114 | def generate_one(self):
weights = [self.probability_func(self.generated[element])
for element in self.domain]
element = random.choices(self.domain, weights=weights)[0]
self.generated[element] += 1
return element | Generate a single element.
Returns
-------
element
An element from the domain.
Examples
-------
>>> generator = RepellentGenerator(['a', 'b'])
>>> gen_item = generator.generate_one()
>>> gen_item in ['a', 'b']
True |
4,115 | def _write_git_file_and_module_config(cls, working_tree_dir, module_abspath):
git_file = osp.join(working_tree_dir, )
rela_path = osp.relpath(module_abspath, start=working_tree_dir)
if is_win:
if osp.isfile(git_file):
os.remove(git_file)
with open(git_file, ) as fp:
fp.write(("gitdir: %s" % rela_path).encode(defenc))
with GitConfigParser(osp.join(module_abspath, ),
read_only=False, merge_includes=False) as writer:
writer.set_value(, ,
to_native_path_linux(osp.relpath(working_tree_dir, start=module_abspath))) | Writes a .git file containing a (preferably) relative path to the actual git module repository.
It is an error if the module_abspath cannot be made into a relative path, relative to the working_tree_dir
:note: will overwrite existing files !
:note: as we rewrite both the git file as well as the module configuration, we might fail on the configuration
and will not roll back changes done to the git file. This should be a non-issue, but may easily be fixed
if it becomes one
:param working_tree_dir: directory to write the .git file into
:param module_abspath: absolute path to the bare repository |
4,116 | def uninstall(self):
if self.installed:
sys.meta_path.remove(self)
import_list = []
for name in self.__loaded_modules:
del sys.modules[name]
import_list.append(name)
for name in import_list:
__import__(name)
self.__reset() | Uninstall the module finder. If not installed, this will do nothing.
After uninstallation, none of the newly loaded modules will be
decorated (that is, everything will be back to normal). |
4,117 | def start(self):
if self.mode == "manual":
return
if self.ipython_dir != :
self.ipython_dir = os.path.abspath(os.path.expanduser(self.ipython_dir))
if self.log:
stdout = open(os.path.join(self.ipython_dir, "{0}.controller.out".format(self.profile)), )
stderr = open(os.path.join(self.ipython_dir, "{0}.controller.err".format(self.profile)), )
else:
stdout = open(os.devnull, )
stderr = open(os.devnull, )
try:
opts = [
,
if self.ipython_dir == else .format(self.ipython_dir),
self.interfaces if self.interfaces is not None else ,
if self.profile == else .format(self.profile),
if self.reuse else ,
.format(self.public_ip) if self.public_ip else ,
.format(self.port) if self.port is not None else
]
if self.port_range is not None:
opts += [
.format(self.hb_ping, self.hb_pong),
.format(self.control_client, self.control_engine),
.format(self.mux_client, self.mux_engine),
.format(self.task_client, self.task_engine)
]
logger.debug("Starting ipcontroller with ".format(.join([str(x) for x in opts])))
self.proc = subprocess.Popen(opts, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)
except FileNotFoundError:
msg = "Could not find ipcontroller. Please make sure that ipyparallel is installed and available in your env"
logger.error(msg)
raise ControllerError(msg)
except Exception as e:
msg = "IPPController failed to start: {0}".format(e)
logger.error(msg)
raise ControllerError(msg) | Start the controller. |
4,118 | def host_info_getter(func, name=None):
name = name or func.__name__
host_info_gatherers[name] = func
return func | The decorated function is added to the process of collecting the host_info.
This just adds the decorated function to the global
``sacred.host_info.host_info_gatherers`` dictionary.
The functions from that dictionary are used when collecting the host info
using :py:func:`~sacred.host_info.get_host_info`.
Parameters
----------
func : callable
A function that can be called without arguments and returns some
json-serializable information.
name : str, optional
The name of the corresponding entry in host_info.
Defaults to the name of the function.
Returns
-------
The function itself. |
4,119 | def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
)
filter_collection = getattr(
configuration_object, , None)
if not filter_collection:
raise errors.BadConfigObject(
)
date_filters = getattr(options, , None)
if not date_filters:
return
file_entry_filter = file_entry_filters.DateTimeFileEntryFilter()
for date_filter in date_filters:
date_filter_pieces = date_filter.split()
if len(date_filter_pieces) != 3:
raise errors.BadConfigOption(
.format(date_filter))
time_value, start_time_string, end_time_string = date_filter_pieces
time_value = time_value.strip()
start_time_string = start_time_string.strip()
end_time_string = end_time_string.strip()
try:
file_entry_filter.AddDateTimeRange(
time_value, start_time_string=start_time_string,
end_time_string=end_time_string)
except ValueError:
raise errors.BadConfigOption(
.format(date_filter))
filter_collection.AddFilter(file_entry_filter) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type. |
4,120 | def parse_timezone(matches, default_timezone=UTC):
if matches["timezone"] == "Z":
return UTC
if matches["timezone"] is None:
return default_timezone
sign = matches["tz_sign"]
hours = to_int(matches, "tz_hour")
minutes = to_int(matches, "tz_minute", default_to_zero=True)
description = "%s%02d:%02d" % (sign, hours, minutes)
if sign == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, description) | Parses ISO 8601 time zone specs into tzinfo offsets |
4,121 | def description(self, request, id, description):
request.data = json.dumps({
"description": description
})
return self.send(request, id).json()[] | Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description |
4,122 | def size(self, filename: str) -> int:
yield from self._control_stream.write_command(Command(, filename))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(, ReplyCodes.file_status, reply)
try:
return int(reply.text.strip())
except ValueError:
return | Get size of file.
Coroutine. |
4,123 | def create(self):
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize() | Calls various methods sequentially in order to fully build the
database. |
4,124 | def user_parse(data):
yield , data.get()
yield , data.get()
yield , data.get()
yield , "https://cdn.discordapp.com/avatars/{}/{}.png".format(
data.get(), data.get()) | Parse information from the provider. |
4,125 | def lemke_howson(g, init_pivot=0, max_iter=10**6, capping=None,
full_output=False):
try:
N = g.N
except:
raise TypeError()
if N != 2:
raise NotImplementedError()
payoff_matrices = g.payoff_arrays
nums_actions = g.nums_actions
total_num = sum(nums_actions)
msg = + \
.format(total_num)
if not isinstance(init_pivot, numbers.Integral):
raise TypeError(msg)
if not (0 <= init_pivot < total_num):
raise ValueError(msg)
if capping is None:
capping = max_iter
tableaux = tuple(
np.empty((nums_actions[1-i], total_num+1)) for i in range(N)
)
bases = tuple(np.empty(nums_actions[1-i], dtype=int) for i in range(N))
converged, num_iter, init_pivot_used = \
_lemke_howson_capping(payoff_matrices, tableaux, bases, init_pivot,
max_iter, capping)
NE = _get_mixed_actions(tableaux, bases)
if not full_output:
return NE
res = NashResult(NE=NE,
converged=converged,
num_iter=num_iter,
max_iter=max_iter,
init=init_pivot_used)
return NE, res | Find one mixed-action Nash equilibrium of a 2-player normal form
game by the Lemke-Howson algorithm [2]_, implemented with
"complementary pivoting" (see, e.g., von Stengel [3]_ for details).
Parameters
----------
g : NormalFormGame
NormalFormGame instance with 2 players.
init_pivot : scalar(int), optional(default=0)
Initial pivot, an integer k such that 0 <= k < m+n, where
integers 0, ..., m-1 and m, ..., m+n-1 correspond to the actions
of players 0 and 1, respectively.
max_iter : scalar(int), optional(default=10**6)
Maximum number of pivoting steps.
capping : scalar(int), optional(default=None)
If supplied, the routine is executed with the heuristics
proposed by Codenotti et al. [1]_; see Notes below for details.
full_output : bool, optional(default=False)
If False, only the computed Nash equilibrium is returned. If
True, the return value is `(NE, res)`, where `NE` is the Nash
equilibrium and `res` is a `NashResult` object.
Returns
-------
NE : tuple(ndarray(float, ndim=1))
Tuple of computed Nash equilibrium mixed actions.
res : NashResult
Object containing information about the computation. Returned
only when `full_output` is True. See `NashResult` for details.
Examples
--------
Consider the following game from von Stengel [3]_:
>>> np.set_printoptions(precision=4) # Reduce the digits printed
>>> bimatrix = [[(3, 3), (3, 2)],
... [(2, 2), (5, 6)],
... [(0, 3), (6, 1)]]
>>> g = NormalFormGame(bimatrix)
Obtain a Nash equilibrium of this game by `lemke_howson` with player
0's action 1 (out of the three actions 0, 1, and 2) as the initial
pivot:
>>> lemke_howson(g, init_pivot=1)
(array([ 0. , 0.3333, 0.6667]), array([ 0.3333, 0.6667]))
>>> g.is_nash(_)
True
Additional information is returned if `full_output` is set True:
>>> NE, res = lemke_howson(g, init_pivot=1, full_output=True)
>>> res.converged # Whether the routine has converged
True
>>> res.num_iter # Number of pivoting steps performed
4
Notes
-----
* This routine is implemented with floating point arithmetic and
thus is subject to numerical instability.
* If `capping` is set to a positive integer, the routine is executed
with the heuristics proposed by [1]_:
* For k = `init_pivot`, `init_pivot` + 1, ..., `init_pivot` +
(m+n-2), (modulo m+n), the Lemke-Howson algorithm is executed
with k as the initial pivot and `capping` as the maximum number
of pivoting steps. If the algorithm converges during this loop,
then the Nash equilibrium found is returned.
* Otherwise, the Lemke-Howson algorithm is executed with
`init_pivot` + (m+n-1) (modulo m+n) as the initial pivot, with a
limit `max_iter` on the total number of pivoting steps.
Accoding to the simulation results for *uniformly random games*,
for medium- to large-size games this heuristics outperforms the
basic Lemke-Howson algorithm with a fixed initial pivot, where
[1]_ suggests that `capping` be set to 10.
References
----------
.. [1] B. Codenotti, S. De Rossi, and M. Pagan, "An Experimental
Analysis of Lemke-Howson Algorithm," arXiv:0811.3247, 2008.
.. [2] C. E. Lemke and J. T. Howson, "Equilibrium Points of Bimatrix
Games," Journal of the Society for Industrial and Applied
Mathematics (1964), 413-423.
.. [3] B. von Stengel, "Equilibrium Computation for Two-Player Games
in Strategic and Extensive Form," Chapter 3, N. Nisan, T.
Roughgarden, E. Tardos, and V. Vazirani eds., Algorithmic Game
Theory, 2007. |
4,126 | def jsonify(resource):
response = flask.jsonify(resource.to_dict())
response = add_link_headers(response, resource.links())
return response | Return a Flask ``Response`` object containing a
JSON representation of *resource*.
:param resource: The resource to act as the basis of the response |
4,127 | def object_exists_in_project(obj_id, proj_id):
if obj_id is None:
raise ValueError("Expected obj_id to be a string")
if proj_id is None:
raise ValueError("Expected proj_id to be a string")
if not is_container_id(proj_id):
raise ValueError( % (proj_id,))
return try_call(dxpy.DXHTTPRequest, + obj_id + , {: proj_id})[] == proj_id | :param obj_id: object ID
:type obj_id: str
:param proj_id: project ID
:type proj_id: str
Returns True if the specified data object can be found in the specified
project. |
4,128 | def score(self, X, y=None, **kwargs):
y_pred = self.predict(X)
scores = precision_recall_fscore_support(y, y_pred)
self.support_score_ = scores[-1]
scores = list(scores)
scores[-1] = scores[-1] / scores[-1].sum()
scores = map(lambda s: dict(zip(self.classes_, s)), scores)
self.scores_ = dict(zip(SCORES_KEYS, scores))
if not self.support:
self.scores_.pop()
self.draw()
self.score_ = self.estimator.score(X, y)
return self.score_ | Generates the Scikit-Learn classification report.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy score |
4,129 | def run_with_reloader(main_func, extra_files=None, interval=1):
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get() == :
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass | Run the given function in an independent python interpreter. |
4,130 | def get_data_source(self):
product_type = self.product_id.split()[1]
if product_type.endswith() or product_type == :
return DataSource.SENTINEL2_L1C
if product_type.endswith() or product_type == :
return DataSource.SENTINEL2_L2A
raise ValueError(.format(self.product_id)) | The method determines data source from product ID.
:return: Data source of the product
:rtype: DataSource
:raises: ValueError |
4,131 | def record_run(record_type, print_session_id, **kwds):
if print_session_id and record_type != :
raise RuntimeError(
)
cfstore = ConfigStore()
json.dump(data, fp) | Record shell history. |
4,132 | def vm_ip(cls, vm_id):
vm_info = cls.info(vm_id)
for iface in vm_info[]:
if iface[] == :
continue
for ip in iface[]:
return ip[], ip[] | Return the first usable ip address for this vm.
Returns a (version, ip) tuple. |
4,133 | def save_pointings(self):
import tkFileDialog
f=tkFileDialog.asksaveasfile()
i=0
if self.pointing_format.get()==:
f.write()
if self.pointing_format.get()==:
f.write("index\n")
for pointing in self.pointings:
i=i+1
name=pointing["label"]["text"]
(sra,sdec)=str(pointing["camera"]).split()
ra=sra.split(":")
dec=sdec.split(":")
dec[0]=str(int(dec[0]))
if int(dec[0])>=0:
dec[0]=+dec[0]
if self.pointing_format.get()==:
f.write( "%5d %16s %2s %2s %4s %3s %2s %4s 2000\n" % (i, name,
ra[0].zfill(2),
ra[1].zfill(2),
ra[2].zfill(2),
dec[0].zfill(3),
dec[1].zfill(2),
dec[2].zfill(2)))
elif self.pointing_format.get()==:
f.write("%-20s|%11s|%11s|%6.1f|%-5d|\n" % (name,sra,sdec,2000.0,1))
elif self.pointing_format.get()==:
str1 = sra.replace(":"," ")
str2 = sdec.replace(":"," ")
f.write("%16s %16s %16s 2000\n" % ( name, str1, str2) )
elif self.pointing_format.get()==:
ra = []
dec= []
for ccd in pointing["camera"].getGeometry():
ra.append(ccd[0])
ra.append(ccd[2])
dec.append(ccd[1])
dec.append(ccd[3])
import math
dra=math.degrees(math.fabs(max(ra)-min(ra)))
ddec=math.degrees(math.fabs(max(dec)-min(dec)))
f.write("%f %f %16s %16s DATE 1.00 1.00 500 FILE\n" % (dra, ddec, sra, sdec ) )
if self.pointing_format.get()==:
f.write()
f.close() | Print the currently defined FOVs |
4,134 | def autodiscover():
from django.conf import settings
for application in settings.INSTALLED_APPS:
module = import_module(application)
if module_has_submodule(module, ):
emails = import_module( % application)
try:
import_module( % application)
except ImportError:
if module_has_submodule(emails, ):
raise | Imports all available previews classes. |
4,135 | def minimum_needs_section_header_element(feature, parent):
_ = feature, parent
header = minimum_needs_section_header[]
return header.capitalize() | Retrieve minimum needs section header string from definitions. |
4,136 | def is_descendant_of_vault(self, id_, vault_id):
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=vault_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=vault_id) | Tests if an ``Id`` is a descendant of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``. |
4,137 | def get_docker_network(self, container_id, all_stats):
network_new = {}
try:
netcounters = all_stats["networks"]
except KeyError as e:
logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
return network_new
if not hasattr(self, ):
self.netcounters_old = {}
try:
self.netcounters_old[container_id] = netcounters
except (IOError, UnboundLocalError):
pass
if container_id not in self.netcounters_old:
try:
self.netcounters_old[container_id] = netcounters
except (IOError, UnboundLocalError):
pass
else:
try:
network_new[] = getTimeSinceLastUpdate(.format(container_id))
network_new[] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"]
network_new[] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"]
network_new[] = netcounters["eth0"]["rx_bytes"]
network_new[] = netcounters["eth0"]["tx_bytes"]
except KeyError as e:
logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
self.netcounters_old[container_id] = netcounters
return network_new | Return the container network usage using the Docker API (v1.0 or higher).
Input: id is the full container id
Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
with:
time_since_update: number of seconds elapsed between the latest grab
rx: Number of byte received
tx: Number of byte transmited |
4,138 | def location(self):
try:
return self.data.get().get()
except (KeyError, AttributeError):
return self.device_status_simple() | Return the location of the printer. |
4,139 | def _run_code(code, run_globals, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
if init_globals is not None:
run_globals.update(init_globals)
run_globals.update(__name__ = mod_name,
__file__ = mod_fname,
__loader__ = mod_loader,
__package__ = pkg_name)
exec code in run_globals
return run_globals | Helper to run code in nominated namespace |
4,140 | def open():
global _MATLAB_RELEASE
if is_win:
ret = MatlabConnection()
ret.open()
return ret
else:
if settings.MATLAB_PATH != :
matlab_path = settings.MATLAB_PATH +
elif _MATLAB_RELEASE != :
matlab_path = discover_location(_MATLAB_RELEASE)
else:
raise MatlabReleaseNotFound()
try:
ret = MatlabConnection(matlab_path)
ret.open()
except Exception:
raise MatlabReleaseNotFound( % matlab_path)
return ret | Opens MATLAB using specified connection (or DCOM+ protocol on Windows)where matlab_location |
4,141 | def listar(self, id_divisao=None, id_ambiente_logico=None):
url =
if is_valid_int_param(id_divisao) and not is_valid_int_param(
id_ambiente_logico):
url = + str(id_divisao) +
elif is_valid_int_param(id_divisao) and is_valid_int_param(id_ambiente_logico):
url = + \
str(id_divisao) + + str(id_ambiente_logico) +
code, xml = self.submit(None, , url)
key =
return get_list_map(self.response(code, xml, [key]), key) | Lista os ambientes filtrados conforme parâmetros informados.
Se os dois parâmetros têm o valor None então retorna todos os ambientes.
Se o id_divisao é diferente de None então retorna os ambientes filtrados
pelo valor de id_divisao.
Se o id_divisao e id_ambiente_logico são diferentes de None então retorna
os ambientes filtrados por id_divisao e id_ambiente_logico.
:param id_divisao: Identificador da divisão de data center.
:param id_ambiente_logico: Identificador do ambiente lógico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': [{'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >},
... demais ambientes ... ]}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta. |
4,142 | def _get_relationships(model):
relationships = []
for name, relationship in inspect(model).relationships.items():
class_ = relationship.mapper.class_
if relationship.uselist:
rel = ListRelationship(name, relation=class_.__name__)
else:
rel = Relationship(name, relation=class_.__name__)
relationships.append(rel)
return tuple(relationships) | Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple |
4,143 | def configure_callbacks(app):
@app.before_request
def before_request():
from flask import session
session[] = helper.generate_menusystem()
print session[] | Configure application callbacks |
4,144 | def _read_input_csv(in_file):
with io.open(in_file, newline=None) as in_handle:
reader = csv.reader(in_handle)
next(reader)
for line in reader:
if line:
(fc_id, lane, sample_id, genome, barcode) = line[:5]
yield fc_id, lane, sample_id, genome, barcode | Parse useful details from SampleSheet CSV file. |
4,145 | def _evaluate(self,R,z,phi=0.,t=0.):
if self.alpha == 2.:
return nu.log(R**2.+z**2.)/2.
else:
return -(R**2.+z**2.)**(1.-self.alpha/2.)/(self.alpha-2.) | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-07-10 - Started - Bovy (NYU) |
4,146 | def check(self, check_url=None):
if check_url is not None:
self.check_url = self._normalize_check_url(check_url)
response = None
sleeped = 0.0
t = datetime.now()
while not response:
try:
response = requests.get(self.check_url, verify=False)
except requests.exceptions.ConnectionError:
if sleeped > self.timeout:
self._kill()
raise LiveAndLetDieError(
t start in specified timeout {2} seconds!\ncommand: {3} '.join(self.create_command())
)
)
time.sleep(1)
sleeped = _get_total_seconds(datetime.now() - t)
return _get_total_seconds(datetime.now() - t) | Checks whether a server is running.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{self.host}:{self.port}"``. |
4,147 | def get_sof_term(self, C, rup):
if rup.rake <= -45.0 and rup.rake >= -135.0:
return C["FN_UM"]
elif rup.rake > 45.0 and rup.rake < 135.0:
return C["FRV_UM"]
else:
return 0.0 | In the case of the upper mantle events separate coefficients
are considered for normal, reverse and strike-slip |
4,148 | def __grabHotkeys(self):
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
for item in c.globalHotkeys:
if item.enabled:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
self.__enqueue(self.__recurseTree, self.rootWindow, hotkeys) | Run during startup to grab global and specific hotkeys in all open windows |
4,149 | def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
model = queryset.model
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = .join([_quote(field) for field in all_field_names])
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = .join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = .join([
.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = + _get_return_fields_sql(returning, return_status=True) if returning else
ignore_duplicates_sql =
if ignore_duplicate_updates:
ignore_duplicates_sql = (
).format(
update_fields_sql=.join(
.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=.join(
+ _quote(field.column)
for field in update_fields
)
)
on_conflict = (
.format(update_fields_sql, ignore_duplicates_sql) if update_fields else
)
if return_untouched:
row_values_sql = .join([
{0}\.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
n\
n\
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias=)
)
else:
row_values_sql = .join(row_values)
sql = (
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args | Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2; |
4,150 | def dump(bqm, fp, vartype_header=False):
for triplet in _iter_triplets(bqm, vartype_header):
fp.write( % triplet) | Dump a binary quadratic model to a string in COOrdinate format. |
4,151 | def run_command(self, command, arg=None, is_eval=False, member_id=None):
logger.debug("run_command({command}, {arg}, {is_eval}, {member_id})".format(**locals()))
mode = is_eval and or
hostname = None
if isinstance(member_id, int):
hostname = self.member_id_to_host(member_id)
result = getattr(self.connection(hostname=hostname).admin, mode)(command, arg)
logger.debug("command result: {result}".format(result=result))
return result | run command on replica set
if member_id is specified command will be execute on this server
if member_id is not specified command will be execute on the primary
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
member_id - member id
return command's result |
4,152 | def genesis_block_audit(genesis_block_stages, key_bundle=GENESIS_BLOCK_SIGNING_KEYS):
gpg2_path = find_gpg2()
if gpg2_path is None:
raise Exception()
log.debug(.format(len(key_bundle)))
res = load_signing_keys(gpg2_path, [key_bundle[kid] for kid in key_bundle])
if not res:
raise Exception()
log.debug(.format(len(key_bundle)))
res = check_gpg2_keys(gpg2_path, key_bundle.keys())
if not res:
raise Exception()
d = tempfile.mkdtemp(prefix=)
for stage_id, stage in enumerate(genesis_block_stages):
log.debug(.format(stage_id))
try:
jsonschema.validate(GENESIS_BLOCK_SCHEMA, stage)
except jsonschema.ValidationError:
shutil.rmtree(d)
log.error()
raise ValueError()
for history_id, history_row in enumerate(stage[]):
with open(os.path.join(d, ), ) as f:
f.write(history_row[])
with open(os.path.join(d, ), ) as f:
f.write(history_row[])
p = subprocess.Popen([gpg2_path, , os.path.join(d,), os.path.join(d,)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
log.error(.format(stage_id, history_id))
shutil.rmtree(d)
return False
gb_rows_str = json.dumps(stage[], sort_keys=True, separators=(,)) +
gb_rows_hash = hashlib.sha256(gb_rows_str).hexdigest()
if gb_rows_hash != stage[][-1][]:
log.error(.format(stage_id, gb_rows_hash, stage[][-1][]))
shutil.rmtree(d)
return False
shutil.rmtree(d)
log.info()
return True | Verify the authenticity of the stages of the genesis block, optionally with a given set of keys.
Return True if valid
Return False if not |
4,153 | def _Rforce(self,R,z,phi=0.,t=0.):
return -R/(R**2.+z**2.)**(self.alpha/2.) | NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-10 - Written - Bovy (NYU) |
4,154 | def create_row_to_some_id_col_mapping(id_array):
original_order_unique_ids = get_original_order_unique_ids(id_array)
rows_to_ids = (id_array[:, None] ==
original_order_unique_ids[None, :]).astype(int)
return rows_to_ids | Parameters
----------
id_array : 1D ndarray.
All elements of the array should be ints representing some id related
to the corresponding row.
Returns
-------
rows_to_ids : 2D scipy sparse array.
Will map each row of id_array to the unique values of `id_array`. The
columns of the returned sparse array will correspond to the unique
values of `id_array`, in the order of appearance for each of these
unique values. |
4,155 | def on_change_checkout(self):
checkout_date = time.strftime(dt)
checkin_date = time.strftime(dt)
if not (checkout_date and checkin_date):
return {: {}}
delta = timedelta(days=1)
dat_a = time.strptime(checkout_date, dt)[:5]
addDays = datetime(*dat_a) + delta
self.dummy = addDays.strftime(dt) | When you change checkout or checkin update dummy field
-----------------------------------------------------------
@param self: object pointer
@return: raise warning depending on the validation |
4,156 | def create_server(self, *args, **kwargs):
if not in kwargs:
kwargs[] = False
s = super(HPNova, self).create_server(*args, **kwargs)
return fix_hp_addrs(s) | Wraps :meth:`bang.providers.openstack.Nova.create_server` to apply
hpcloud specialization, namely pulling IP addresses from the hpcloud's
non-standard return values. |
4,157 | def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size):
state_names = []
data = S.var()
weight = S.var("encoder_weight", stype=)
embed = S.sparse.Embedding(data=data, weight=weight, input_dim=vocab_size,
output_dim=num_embed, name=, sparse_grad=True)
states = []
outputs = S.Dropout(embed, p=dropout)
for i in range(num_layers):
prefix = % i
init_h = S.var(prefix + , shape=(batch_size, num_proj), init=mx.init.Zero())
init_c = S.var(prefix + , shape=(batch_size, nhid), init=mx.init.Zero())
state_names += [prefix + , prefix + ]
lstmp = mx.gluon.contrib.rnn.LSTMPCell(nhid, num_proj, prefix=prefix)
outputs, next_states = lstmp.unroll(bptt, outputs, begin_state=[init_h, init_c], \
layout=, merge_outputs=True)
outputs = S.Dropout(outputs, p=dropout)
states += [S.stop_gradient(s) for s in next_states]
outputs = S.reshape(outputs, shape=(-1, num_proj))
trainable_lstm_args = []
for arg in outputs.list_arguments():
if in arg and not in arg:
trainable_lstm_args.append(arg)
return outputs, states, trainable_lstm_args, state_names | word embedding + LSTM Projected |
4,158 | def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype):
assert x.shape.ndims == 1 + num_nonbatch_dims
new_shape = (
prototype.shape.as_list()[:-num_nonbatch_dims] + x.shape.as_list()[1:])
assert None not in new_shape
if new_shape != x.shape.as_list():
x = tf.reshape(x, new_shape)
return x | Reverse op of _tf_flatten_batch_dims.
Un-flatten the first dimension of x to match all but the last
num_nonbatch_dims dimensions of prototype.
Args:
x: a tf.Tensor with 1 + num_nonbatch_dims dimensions
num_nonbatch_dims: an integer
prototype: a tf.Tensor
Returns:
a tf.Tensor |
4,159 | def stop_subscribe(self):
asyncio.gather(*asyncio.Task.all_tasks()).cancel()
self.event_loop.stop()
self.event_loop.close() | This function is used to stop the event loop created when subscribe is called. But this function doesn't
stop the thread and should be avoided until its completely developed. |
4,160 | def add_resource(
self,
base_rule,
base_view,
alternate_view=None,
alternate_rule=None,
id_rule=None,
app=None,
):
if alternate_view:
if not alternate_rule:
id_rule = id_rule or DEFAULT_ID_RULE
alternate_rule = posixpath.join(base_rule, id_rule)
else:
assert id_rule is None
else:
assert alternate_rule is None
assert id_rule is None
app = self._get_app(app)
endpoint = self._get_endpoint(base_view, alternate_view)
alternate_view_func = alternate_view.as_view(endpoint)
@functools.wraps(base_view_func)
def view_func(*args, **kwargs):
if flask.request.url_rule.rule == base_rule_full:
return base_view_func(*args, **kwargs)
else:
return alternate_view_func(*args, **kwargs)
app.add_url_rule(
base_rule_full, view_func=view_func, endpoint=endpoint,
methods=base_view.methods,
)
app.add_url_rule(
alternate_rule_full, view_func=view_func, endpoint=endpoint,
methods=alternate_view.methods,
)
views[base_view] = Resource(base_view, base_rule_full)
views[alternate_view] = Resource(alternate_view, alternate_rule_full) | Add route or routes for a resource.
:param str base_rule: The URL rule for the resource. This will be
prefixed by the API prefix.
:param base_view: Class-based view for the resource.
:param alternate_view: If specified, an alternate class-based view for
the resource. Usually, this will be a detail view, when the base
view is a list view.
:param alternate_rule: If specified, the URL rule for the alternate
view. This will be prefixed by the API prefix. This is mutually
exclusive with id_rule, and must not be specified if alternate_view
is not specified.
:type alternate_rule: str or None
:param id_rule: If specified, a suffix to append to base_rule to get
the alternate view URL rule. If alternate_view is specified, and
alternate_rule is not, then this defaults to '<id>'. This is
mutually exclusive with alternate_rule, and must not be specified
if alternate_view is not specified.
:type id_rule: str or None
:param app: If specified, the application to which to add the route(s).
Otherwise, this will be the bound application, if present. |
4,161 | def search_commits(self, query, sort=github.GithubObject.NotSet, order=github.GithubObject.NotSet, **qualifiers):
assert isinstance(query, (str, unicode)), query
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
assert sort in (, ), sort
url_parameters["sort"] = sort
if order is not github.GithubObject.NotSet:
assert order in (, ), order
url_parameters["order"] = order
query_chunks = []
if query:
query_chunks.append(query)
for qualifier, value in qualifiers.items():
query_chunks.append("%s:%s" % (qualifier, value))
url_parameters["q"] = .join(query_chunks)
assert url_parameters["q"], "need at least one qualifier"
return github.PaginatedList.PaginatedList(
github.Commit.Commit,
self.__requester,
"/search/commits",
url_parameters,
headers={
"Accept": Consts.mediaTypeCommitSearchPreview
}
) | :calls: `GET /search/commits <http://developer.github.com/v3/search>`_
:param query: string
:param sort: string ('author-date', 'committer-date')
:param order: string ('asc', 'desc')
:param qualifiers: keyword dict query qualifiers
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit` |
4,162 | def clonemedium(medium,
uuid_in=None,
file_in=None,
uuid_out=None,
file_out=None,
mformat=None,
variant=None,
existing=False,
**kwargs):
hypervisor
params =
valid_mediums = (, , )
if medium in valid_mediums:
params += medium
else:
raise CommandExecutionError(
.format(.join(valid_mediums))
)
if (uuid_in and file_in) or (not uuid_in and not file_in):
raise CommandExecutionError(
)
if uuid_in:
if medium == :
item =
elif medium == :
item =
elif medium == :
item =
items = list_items(item)
if uuid_in not in items:
raise CommandExecutionError(.format(uuid_in))
params += + uuid_in
elif file_in:
if not os.path.exists(file_in):
raise CommandExecutionError(.format(file_in))
params += + file_in
if (uuid_out and file_out) or (not uuid_out and not file_out):
raise CommandExecutionError(
)
if uuid_out:
params += + uuid_out
elif file_out:
try:
salt.utils.files.fopen(file_out, ).close()
os.unlink(file_out)
params += + file_out
except OSError:
raise CommandExecutionError(.format(file_out))
if mformat:
valid_mformat = (, , , )
if mformat not in valid_mformat:
raise CommandExecutionError(
.format(.join(valid_mformat))
)
else:
params += + mformat
valid_variant = (, , , , )
if variant and variant not in valid_variant:
if not os.path.exists(file_in):
raise CommandExecutionError(
.format(.join(valid_variant))
)
else:
params += + variant
if existing:
params +=
cmd = .format(vboxcmd(), params)
ret = salt.modules.cmdmod.run_all(cmd)
if ret[] == 0:
return True
return ret[] | Clone a new VM from an existing VM
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.clonemedium <name> <new_name> |
4,163 | def save(self, inplace=True):
modified_data = self._modified_data()
if bool(modified_data):
extra = {
: self.__class__.__name__,
: {
: self.id,
: modified_data
}
}
logger.info(, extra=extra)
data = self._api.patch(url=self._URL[].format(id=self.id),
data=modified_data).json()
marker = Marker(api=self._api, **data)
return marker
else:
raise ResourceNotModified() | Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance. |
4,164 | def _to_dict(self, node):
if node == BLANK_NODE:
return {}
node_type = self._get_node_type(node)
if is_key_value_type(node_type):
nibbles = without_terminator(unpack_to_nibbles(node[0]))
key = b.join([to_string(x) for x in nibbles])
if node_type == NODE_TYPE_EXTENSION:
sub_dict = self._to_dict(self._decode_to_node(node[1]))
else:
sub_dict = {to_string(NIBBLE_TERMINATOR): node[1]}
res = {}
for sub_key, sub_value in sub_dict.items():
full_key = (key + b + sub_key).strip(b)
res[full_key] = sub_value
return res
elif node_type == NODE_TYPE_BRANCH:
res = {}
for i in range(16):
sub_dict = self._to_dict(self._decode_to_node(node[i]))
for sub_key, sub_value in sub_dict.items():
full_key = (
str_to_bytes(
str(i)) +
b +
sub_key).strip(b)
res[full_key] = sub_value
if node[16]:
res[to_string(NIBBLE_TERMINATOR)] = node[-1]
return res | convert (key, value) stored in this and the descendant nodes
to dict items.
:param node: node in form of list, or BLANK_NODE
.. note::
Here key is in full form, rather than key of the individual node |
4,165 | def get_hkr_state(self):
self.update()
try:
return {
126.5: ,
127.0: ,
self.eco_temperature: ,
self.comfort_temperature:
}[self.target_temperature]
except KeyError:
return | Get the thermostate state. |
4,166 | def drawDisplay( self, painter, option, rect, text ):
painter.setBrush(Qt.NoBrush)
painter.drawText(rect.left() + 3,
rect.top(),
rect.width() - 3,
rect.height(),
option.displayAlignment,
text) | Handles the display drawing for this delegate.
:param painter | <QPainter>
option | <QStyleOption>
rect | <QRect>
text | <str> |
4,167 | async def _notify_update(self, name, change_type, change_info=None, directed_client=None):
for monitor in self._monitors:
try:
result = monitor(name, change_type, change_info, directed_client=directed_client)
if inspect.isawaitable(result):
await result
except Exception:
self._logger.warning("Error calling monitor with update %s", name, exc_info=True) | Notify updates on a service to anyone who cares. |
4,168 | def gzip_dir(path, compresslevel=6):
for f in os.listdir(path):
full_f = os.path.join(path, f)
if not f.lower().endswith("gz"):
with open(full_f, ) as f_in, \
GzipFile(.format(full_f), ,
compresslevel=compresslevel) as f_out:
shutil.copyfileobj(f_in, f_out)
shutil.copystat(full_f,.format(full_f))
os.remove(full_f) | Gzips all files in a directory. Note that this is different from
shutil.make_archive, which creates a tar archive. The aim of this method
is to create gzipped files that can still be read using common Unix-style
commands like zless or zcat.
Args:
path (str): Path to directory.
compresslevel (int): Level of compression, 1-9. 9 is default for
GzipFile, 6 is default for gzip. |
4,169 | def get_instance(self, payload):
return FaxMediaInstance(self._version, payload, fax_sid=self._solution[], ) | Build an instance of FaxMediaInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.fax.v1.fax.fax_media.FaxMediaInstance
:rtype: twilio.rest.fax.v1.fax.fax_media.FaxMediaInstance |
4,170 | def __analizar_evento(self, ret):
"Comprueba y extrae el wvento informativo si existen en la respuesta XML"
evt = ret.get()
if evt:
self.Eventos = [evt]
self.Evento = "%(codigo)s: %(descripcion)s" % evt | Comprueba y extrae el wvento informativo si existen en la respuesta XML |
4,171 | def _parse_saved_model(path):
path_to_pb = _get_saved_model_proto_path(path)
file_content = tf_v1.gfile.Open(path_to_pb, "rb").read()
saved_model = saved_model_pb2.SavedModel()
try:
saved_model.ParseFromString(file_content)
except message.DecodeError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e)))
return saved_model | Reads the savedmodel.pb file containing `SavedModel`. |
4,172 | def check_user(self, todays_facts):
interval = self.conf_notify_interval
if interval <= 0 or interval >= 121:
return
now = dt.datetime.now()
message = None
last_activity = todays_facts[-1] if todays_facts else None
if last_activity and not last_activity[]:
delta = now - last_activity[]
duration = delta.seconds / 60
if duration and duration % interval == 0:
message = _("Working on %s") % last_activity[]
self.notify_user(message)
elif self.conf_notify_on_idle:
if (now.minute + now.hour * 60) % interval == 0:
self.notify_user(_("No activity")) | check if we need to notify user perhaps |
4,173 | def get_notifications(self, **params):
response = self._get(, , params=params)
return self._make_api_object(response, Notification) | https://developers.coinbase.com/api/v2#list-notifications |
4,174 | def get_queryset(self):
queryset = super(IndexView, self).get_queryset()
search_form = self.get_search_form()
if search_form.is_valid():
query_str = search_form.cleaned_data.get(, ).strip()
queryset = self.model.objects.search(query_str)
return queryset | Returns queryset instance.
:rtype: django.db.models.query.QuerySet. |
4,175 | def extrap_sec(data, dist, depth, w1=1.0, w2=0):
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data | Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable |
4,176 | def get_keys_from_ldap(self, username=None):
result_dict = {}
filter = []
if username is not None:
filter.append(.format(username))
attributes = [, ]
results = self.client.search(filter, attributes)
for result in results:
result_dict[result.uid.value] = result.sshPublicKey.values
return result_dict | Fetch keys from ldap.
Args:
username Username associated with keys to fetch (optional)
Returns:
Array of dictionaries in '{username: [public keys]}' format |
4,177 | def pack(fmt, *args, **kwargs):
endian, target = kwargs.get(), kwargs.get()
endian = endian if endian is not None else target.endian if target is not None else pwnypack.target.target.endian
if fmt and fmt[0] not in :
if endian is pwnypack.target.Target.Endian.little:
fmt = + fmt
elif endian is pwnypack.target.Target.Endian.big:
fmt = + fmt
else:
raise NotImplementedError( % endian)
return struct.pack(fmt, *args) | pack(fmt, v1, v2, ..., endian=None, target=None)
Return a string containing the values v1, v2, ... packed according to the
given format. The actual packing is performed by ``struct.pack`` but the
byte order will be set according to the given `endian`, `target` or
byte order of the global target.
Args:
fmt(str): The format string.
v1,v2,...: The values to pack.
endian(:class:`~pwnypack.target.Target.Endian`): Override the default
byte order. If ``None``, it will look at the byte order of
the ``target`` argument.
target(:class:`~pwnypack.target.Target`): Override the default byte
order. If ``None``, it will look at the byte order of
the global :data:`~pwnypack.target.target`.
Returns:
bytes: The provided values packed according to the format. |
4,178 | def set_fingerprint(fullpath, fingerprint=None):
try:
fingerprint = fingerprint or utils.file_fingerprint(fullpath)
record = model.FileFingerprint.get(file_path=fullpath)
if record:
record.set(fingerprint=fingerprint,
file_mtime=os.stat(fullpath).st_mtime)
else:
record = model.FileFingerprint(
file_path=fullpath,
fingerprint=fingerprint,
file_mtime=os.stat(fullpath).st_mtime)
orm.commit()
except FileNotFoundError:
orm.delete(fp for fp in model.FileFingerprint if fp.file_path == fullpath) | Set the last known modification time for a file |
4,179 | def transform(self, vector):
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return self.call("transform", vector) | Applies transformation on a vector or an RDD[Vector].
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param vector: Vector or RDD of Vector to be transformed. |
4,180 | def get(self, timeout=None, block=True):
_vv and IOLOG.debug(,
self, timeout, block)
self._lock.acquire()
try:
if self.closed:
raise LatchError()
i = len(self._sleeping)
if len(self._queue) > i:
_vv and IOLOG.debug(, self, self._queue[i])
return self._queue.pop(i)
if not block:
raise TimeoutError()
rsock, wsock = self._get_socketpair()
cookie = self._make_cookie()
self._sleeping.append((wsock, cookie))
finally:
self._lock.release()
poller = self.poller_class()
poller.start_receive(rsock.fileno())
try:
return self._get_sleep(poller, timeout, block, rsock, wsock, cookie)
finally:
poller.close() | Return the next enqueued object, or sleep waiting for one.
:param float timeout:
If not :data:`None`, specifies a timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the latch is empty.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the object is no longer valid.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:returns:
The de-queued object. |
4,181 | def Update(self, attribute=None):
client_id = self.urn.Split()[0]
if attribute == "CONTAINS":
flow_id = flow.StartAFF4Flow(
client_id=client_id,
flow_name="ListDirectory",
pathspec=self.real_pathspec,
notify_to_user=False,
token=self.token)
return flow_id | Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes: CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed above.
Returns:
The Flow ID that is pending
Raises:
IOError: If there has been an error starting the flow. |
4,182 | def uniform_spacings(N):
z = np.cumsum(-np.log(random.rand(N + 1)))
return z[:-1] / z[-1] | Generate ordered uniform variates in O(N) time.
Parameters
----------
N: int (>0)
the expected number of uniform variates
Returns
-------
(N,) float ndarray
the N ordered variates (ascending order)
Note
----
This is equivalent to::
from numpy import random
u = sort(random.rand(N))
but the line above has complexity O(N*log(N)), whereas the algorithm
used here has complexity O(N). |
4,183 | def running_apps(device_id):
if not is_valid_device_id(device_id):
abort(403)
if device_id not in devices:
abort(404)
return jsonify(running_apps=devices[device_id].running_apps) | Get running apps via HTTP GET. |
4,184 | def f_remove_child(self, name, recursive=False, predicate=None):
if name not in self._children:
raise ValueError( %
(self.v_full_name, name))
else:
child = self._children[name]
if (name not in self._links and
not child.v_is_leaf and
child.f_has_children() and
not recursive):
raise TypeError(
)
else:
self._nn_interface._remove_subtree(self, name, predicate) | Removes a child of the group.
Note that groups and leaves are only removed from the current trajectory in RAM.
If the trajectory is stored to disk, this data is not affected. Thus, removing children
can be only be used to free RAM memory!
If you want to free memory on disk via your storage service,
use :func:`~pypet.trajectory.Trajectory.f_delete_items` of your trajectory.
:param name:
Name of child, naming by grouping is NOT allowed ('groupA.groupB.childC'),
child must be direct successor of current node.
:param recursive:
Must be true if child is a group that has children. Will remove
the whole subtree in this case. Otherwise a Type Error is thrown.
:param predicate:
Predicate which can evaluate for each node to ``True`` in order to remove the node or
``False`` if the node should be kept. Leave ``None`` if you want to remove all nodes.
:raises:
TypeError if recursive is false but there are children below the node.
ValueError if child does not exist. |
4,185 | def transform(self, audio_f=None, jam=None, y=None, sr=None, crop=False):
if y is None:
if audio_f is None:
raise ParameterError(
)
y, sr = librosa.load(audio_f, sr=sr, mono=True)
if sr is None:
raise ParameterError(
)
if jam is None:
jam = jams.JAMS()
jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr)
if not isinstance(jam, jams.JAMS):
jam = jams.load(jam)
data = dict()
for operator in self.ops:
if isinstance(operator, BaseTaskTransformer):
data.update(operator.transform(jam))
elif isinstance(operator, FeatureExtractor):
data.update(operator.transform(y, sr))
if crop:
data = self.crop(data)
return data | Apply the transformations to an audio file, and optionally JAMS object.
Parameters
----------
audio_f : str
Path to audio file
jam : optional, `jams.JAMS`, str or file-like
Optional JAMS object/path to JAMS file/open file descriptor.
If provided, this will provide data for task transformers.
y : np.ndarray
sr : number > 0
If provided, operate directly on an existing audio buffer `y` at
sampling rate `sr` rather than load from `audio_f`.
crop : bool
If `True`, then data are cropped to a common time index across all
fields. Otherwise, data may have different time extents.
Returns
-------
data : dict
Data dictionary containing the transformed audio (and annotations)
Raises
------
ParameterError
At least one of `audio_f` or `(y, sr)` must be provided. |
4,186 | def type_errors(self, context=None):
try:
results = self._infer_augassign(context=context)
return [
result
for result in results
if isinstance(result, util.BadBinaryOperationMessage)
]
except exceptions.InferenceError:
return [] | Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage` ,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage) |
4,187 | def __get_ml_configuration_status(self, job_id):
failure_message = "Get status on ml configuration failed"
response = self._get_success_json(self._get(
+ job_id + , None, failure_message=failure_message))[
]
return response | After invoking the create_ml_configuration async method, you can use this method to
check on the status of the builder job.
:param job_id: The identifier returned from create_ml_configuration
:return: Job status |
4,188 | def add(self, pattern):
def wrap(f):
self.functions.append((f, pattern))
return f
return wrap | Decorator to add new dispatch functions. |
4,189 | def pool_delete(storage_pool, logger):
path = etree.fromstring(storage_pool.XMLDesc(0)).find().text
volumes_delete(storage_pool, logger)
try:
storage_pool.destroy()
except libvirt.libvirtError:
logger.exception("Unable to delete storage pool.")
try:
if os.path.exists(path):
shutil.rmtree(path)
except EnvironmentError:
logger.exception("Unable to delete storage pool folder.") | Storage Pool deletion, removes all the created disk images within the pool and the pool itself. |
4,190 | def update_floatingip_statuses_cfg(self, context, router_id, fip_statuses):
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in six.iteritems(fip_statuses):
LOG.debug("New status for floating IP %(floatingip_id)s: "
"%(status)s", {: floatingip_id,
: status})
try:
self._l3plugin.update_floatingip_status(
context, floatingip_id, status)
except l3_exceptions.FloatingIPNotFound:
LOG.debug("Floating IP: %s no longer present.",
floatingip_id)
known_router_fips = self._l3plugin.get_floatingips(
context, {: [router_id]})
fips_to_disable = (fip[] for fip in known_router_fips
if not fip[])
for fip_id in fips_to_disable:
LOG.debug("update_fip_statuses: disable: %s", fip_id)
self._l3plugin.update_floatingip_status(
context, fip_id, bc.constants.FLOATINGIP_STATUS_DOWN) | Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value |
4,191 | def _get_centered_z1pt0(self, sites):
mean_z1pt0 = (-7.15 / 4.) * np.log(((sites.vs30) ** 4. + 570.94 ** 4.)
/ (1360 ** 4. + 570.94 ** 4.))
centered_z1pt0 = sites.z1pt0 - np.exp(mean_z1pt0)
return centered_z1pt0 | Get z1pt0 centered on the Vs30- dependent avarage z1pt0(m)
California and non-Japan regions |
4,192 | def finalize_canonical_averages(
number_of_nodes, ps, canonical_averages, alpha,
):
spanning_cluster = (
(
in
canonical_averages.dtype.names
) and
in canonical_averages.dtype.names
)
ret = np.empty_like(
canonical_averages,
dtype=finalized_canonical_averages_dtype(
spanning_cluster=spanning_cluster
),
)
n = canonical_averages[]
sqrt_n = np.sqrt(canonical_averages[])
ret[] = n
ret[] = ps
ret[] = alpha
def _transform(
original_key, final_key=None, normalize=False, transpose=False,
):
if final_key is None:
final_key = original_key
keys_mean = [
.format(key)
for key in [original_key, final_key]
]
keys_std = [
.format(original_key),
.format(final_key),
]
key_ci = .format(final_key)
ret[keys_mean[1]] = canonical_averages[keys_mean[0]]
if normalize:
ret[keys_mean[1]] /= number_of_nodes
array = canonical_averages[keys_std[0]]
result = np.sqrt(
(array.T if transpose else array) / (n - 1)
)
ret[keys_std[1]] = (
result.T if transpose else result
)
if normalize:
ret[keys_std[1]] /= number_of_nodes
array = ret[keys_std[1]]
scale = (array.T if transpose else array) / sqrt_n
array = ret[keys_mean[1]]
mean = (array.T if transpose else array)
result = scipy.stats.t.interval(
1 - alpha,
df=n - 1,
loc=mean,
scale=scale,
)
(
ret[key_ci][..., 0], ret[key_ci][..., 1]
) = ([my_array.T for my_array in result] if transpose else result)
if spanning_cluster:
_transform()
_transform(, , normalize=True)
_transform(, normalize=True, transpose=True)
return ret | Finalize canonical averages |
4,193 | def extras_msg(extras):
if len(extras) == 1:
verb = "was"
else:
verb = "were"
return ", ".join(repr(extra) for extra in extras), verb | Create an error message for extra items or properties. |
4,194 | def check_token_payment(name, token_price, stacks_payment_info):
token_units = stacks_payment_info[]
tokens_paid = stacks_payment_info[]
tokens_paid = int(tokens_paid)
if token_units != TOKEN_TYPE_STACKS:
log.warning(.format(token_units, TOKEN_TYPE_STACKS))
return {: False}
if tokens_paid < token_price:
log.warning("Name buyer paid {} {}s, but costs {} units of {}s".format(tokens_paid, token_units, name, token_price, token_units))
return {: False}
return {: True, : tokens_paid, : token_units} | Check a token payment was enough and was of the right type
Return {'status': True, 'tokens_paid': ..., 'token_units': ...} if so
Return {'status': False} if not |
4,195 | def get(self, names, country_id=None, language_id=None, retheader=False):
responses = [
self._get_chunk(name_chunk, country_id, language_id)
for name_chunk
in _chunked(names, Genderize.BATCH_SIZE)
]
data = list(chain.from_iterable(
response.data for response in responses
))
if retheader:
return {
"data": data,
"headers": responses[-1].headers,
}
else:
return data | Look up gender for a list of names.
Can optionally refine search with locale info.
May make multiple requests if there are more names than
can be retrieved in one call.
:param names: List of names.
:type names: Iterable[str]
:param country_id: Optional ISO 3166-1 alpha-2 country code.
:type country_id: Optional[str]
:param language_id: Optional ISO 639-1 language code.
:type language_id: Optional[str]
:param retheader: Optional
:type retheader: Optional[boolean]
:return:
If retheader is False:
List of dicts containing 'name', 'gender',
'probability', 'count' keys. If 'gender' is None,
'probability' and 'count' will be omitted.
else:
A dict containing 'data' and 'headers' keys.
Data is the same as when retheader is False.
Headers are the response header
(a requests.structures.CaseInsensitiveDict).
If multiple requests were made,
the header will be from the last one.
:rtype: Union[dict, Sequence[dict]]
:raises GenderizeException: if API server returns HTTP error code. |
4,196 | def presnyields(self, *cycles, **keyw):
abund_list = []
xm_list = []
if ("xm" in keyw) == False:
keyw["xm"] = "mass"
if ("abund" in keyw) == False:
keyw["abund"] = "iso_massf"
if ("mrem" in keyw) == False:
mrem = 0.
else:
mrem = keyw["mrem"]
cylen = len(cycles)
if cylen > 2:
cylen = 2
for i in range(cylen):
cycle = cycles[i]
abund_list.append(self.se.get(cycle, keyw[]))
xm_list.append(self.se.get(cycle, keyw[]))
isoX = abund_list[0]
xm = xm_list[0]
if cylen == 2:
isoXini = abund_list[1]
niso = len(isoX[0,:])
nshells = len(xm)
X_i = np.zeros([niso], float)
ybound = np.zeros([niso], float)
xarray = np.zeros([nshells+1], float)
yarray = np.zeros([nshells+1], float)
for k in range(nshells):
k1 = k
if mrem<=xm[k]:
break
for i in range(niso):
if k1>=1:
if isoX[k1-1,i]!=0.0:
m=old_div((isoX[k1,i]-isoX[k1-1,i]),(xm[k1]-xm[k1-1]))
ybound[i] = isoX[k1-1,i] +m*(mrem-xm[k1-1])
else:
ybound[i]=1.e-99
if k1==0:
if isoX[k1,i]!=0.0:
ybound[i]=isoX[k1,i]
else:
ybound[i]=1.e-99
xarray[0] = mrem
xarray[1:nshells-k1+1] = xm[k1:nshells]
for i in range(niso):
yarray[0] = ybound[i]
for j in range(nshells-k1):
if isoX[k1+j,i] != 0.0:
yarray[j+1] = isoX[k1+j,i]
else:
yarray[j+1] = 1.e-99
if cylen == 1:
for j in range(nshells-k1):
X_i[i] = X_i[i] + ((0.5*(yarray[j+1] + yarray[j])) * \
(xarray[j+1] - xarray[j]))
elif cylen == 2:
for j in range(nshells-k1):
X_i[i] = X_i[i] + ((0.5*(yarray[j+1] + yarray[j]) - isoXini[-1,i]) * \
(xarray[j+1] - xarray[j]))
return X_i | This function calculates the presupernova yields of a full
structure profile from a remnant mass, mrem, to the surface.
Parameters
----------
cycles : variadic tuple
cycle[0] is the cycle to perform the presupernova yields
calculations on. If cycle[1] is also specified, the yields
are outputted using 'initial' abundances from cycle[1],
otherwise the ejected masses are outputted.
keyw : dict
A dict of key word arguments.
Notes
-----
The following keywords can be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| xm | "mass" |
+------------------+---------------+
| mrem | 0 |
+------------------+---------------+
abund and xm are used when the variables within the input file
differ in their names. The default values are set to the
output typically found in an MPPNP output file. For example,
if the table for the abundances is called "abundances" instead
of the default value, use abund = "abundances" as a keyword
argument.
mrem is specified using a keyword argument and tells the program
where to begin integrating. |
4,197 | def add_instance(model, _commit=True, **kwargs):
try:
model = get_model(model)
except ImportError:
return None
instance = model(**kwargs)
db.session.add(instance)
try:
if _commit:
db.session.commit()
else:
db.session.flush()
return instance.id
except IntegrityError:
db.session.rollback()
return | Add instance to database.
:param model: a string, model name in rio.models
:param _commit: control whether commit data to database or not. Default True.
:param \*\*kwargs: persisted data.
:return: instance id. |
4,198 | def addSuffixToExtensions(toc):
new_toc = TOC()
for inm, fnm, typ in toc:
if typ in (, ):
binext = os.path.splitext(fnm)[1]
if not os.path.splitext(inm)[1] == binext:
inm = inm + binext
new_toc.append((inm, fnm, typ))
return new_toc | Returns a new TOC with proper library suffix for EXTENSION items. |
4,199 | def add(self, command_template, job_class):
job = JobTemplate(command_template.alias,
command_template=command_template,
depends_on=command_template.depends_on, queue=self.queue,
job_class=job_class)
self.queue.push(job) | Given a command template, add it as a job to the queue. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.