code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def function(fname):
"""
Make a function to Function class
"""
def _f(func):
class WrapFunction(Function):
name = fname
def __call__(self, *args, **kwargs):
return func(*args, **kwargs)
return WrapFunction
return _f | Make a function to Function class |
def _requirement_element(self, parent_element, req_data):
"""Adds requirement XML element."""
req_data = self._transform_result(req_data)
if not req_data:
return
title = req_data.get("title")
if not title:
logger.warning("Skipping requirement, title is missing")
return
req_id = req_data.get("id")
if not self._check_lookup_prop(req_id):
logger.warning(
"Skipping requirement `%s`, data missing for selected lookup method", title
)
return
attrs, custom_fields = self._classify_data(req_data)
attrs, custom_fields = self._fill_defaults(attrs, custom_fields)
# For testing purposes, the order of fields in resulting XML
# needs to be always the same.
attrs = OrderedDict(sorted(attrs.items()))
custom_fields = OrderedDict(sorted(custom_fields.items()))
requirement = etree.SubElement(parent_element, "requirement", attrs)
title_el = etree.SubElement(requirement, "title")
title_el.text = title
description = req_data.get("description")
if description:
description_el = etree.SubElement(requirement, "description")
description_el.text = description
self._fill_custom_fields(requirement, custom_fields) | Adds requirement XML element. |
def sort_direction(self):
"""
Return the direction in which the linked table is is sorted by
this column ("asc" or "desc"), or None this column is unsorted.
"""
if self.table._meta.order_by == self.name:
return "asc"
elif self.table._meta.order_by == ("-" + self.name):
return "desc"
else:
return None | Return the direction in which the linked table is is sorted by
this column ("asc" or "desc"), or None this column is unsorted. |
def safe_compare_digest(val1, val2):
"""safe_compare_digest method.
:param val1: string or bytes for compare
:type val1: str | bytes
:param val2: string or bytes for compare
:type val2: str | bytes
"""
if len(val1) != len(val2):
return False
result = 0
if PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
for i, j in zip(val1, val2):
result |= i ^ j
else:
for i, j in zip(val1, val2):
result |= (ord(i) ^ ord(j))
return result == 0 | safe_compare_digest method.
:param val1: string or bytes for compare
:type val1: str | bytes
:param val2: string or bytes for compare
:type val2: str | bytes |
def sort_schemas(schemas):
"""Sort a list of SQL schemas in order"""
def keyfun(v):
x = SQL_SCHEMA_REGEXP.match(v).groups()
# x3: 'DEV' should come before ''
return (int(x[0]), x[1], int(x[2]) if x[2] else None,
x[3] if x[3] else 'zzz', int(x[4]))
return sorted(schemas, key=keyfun) | Sort a list of SQL schemas in order |
def check_model(self, max_paths=1, max_path_length=5):
"""Check all the statements added to the ModelChecker.
Parameters
----------
max_paths : Optional[int]
The maximum number of specific paths to return for each Statement
to be explained. Default: 1
max_path_length : Optional[int]
The maximum length of specific paths to return. Default: 5
Returns
-------
list of (Statement, PathResult)
Each tuple contains the Statement checked against the model and
a PathResult object describing the results of model checking.
"""
results = []
for stmt in self.statements:
result = self.check_statement(stmt, max_paths, max_path_length)
results.append((stmt, result))
return results | Check all the statements added to the ModelChecker.
Parameters
----------
max_paths : Optional[int]
The maximum number of specific paths to return for each Statement
to be explained. Default: 1
max_path_length : Optional[int]
The maximum length of specific paths to return. Default: 5
Returns
-------
list of (Statement, PathResult)
Each tuple contains the Statement checked against the model and
a PathResult object describing the results of model checking. |
def update_serviceprofile(self, host_id, vlan_id):
"""Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports.
"""
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('UCS Manager network driver does not have UCSM IP '
'for Host_id %s', str(host_id))
return False
service_profile = self.ucsm_sp_dict.get((ucsm_ip, host_id))
if service_profile:
LOG.debug('UCS Manager network driver Service Profile : %s',
service_profile)
else:
LOG.info('UCS Manager network driver does not support '
'Host_id %s', host_id)
return False
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
# Create Vlan Profile
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s', str(vlan_id))
return False
# Update Service Profile
if not self._update_service_profile(handle,
service_profile,
vlan_id,
ucsm_ip):
LOG.error('UCS Manager network driver failed to update '
'Service Profile %(service_profile)s in UCSM '
'%(ucsm_ip)s',
{'service_profile': service_profile, 'ucsm_ip': ucsm_ip})
return False
return True | Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports. |
def _init_map(self):
"""stub"""
QuestionFilesFormRecord._init_map(self)
FirstAngleProjectionFormRecord._init_map(self)
super(MultiChoiceOrthoQuestionFormRecord, self)._init_map() | stub |
def _loadf(ins):
""" Loads a floating point value from a memory address.
If 2nd arg. start with '*', it is always treated as
an indirect value.
"""
output = _float_oper(ins.quad[2])
output.extend(_fpush())
return output | Loads a floating point value from a memory address.
If 2nd arg. start with '*', it is always treated as
an indirect value. |
def values_update(self, range, params=None, body=None):
"""Lower-level method that directly calls `spreadsheets.values.update <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update>`_.
:param str range: The `A1 notation <https://developers.google.com/sheets/api/guides/concepts#a1_notation>`_ of the values to update.
:param dict params: (optional) `Query parameters <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#query-parameters>`_.
:param dict body: (optional) `Request body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#request-body>`_.
:returns: `Response body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#response-body>`_.
:rtype: dict
Example::
sh.values_update(
'Sheet1!A2',
params={
'valueInputOption': 'USER_ENTERED'
},
body={
'values': [[1, 2, 3]]
}
)
.. versionadded:: 3.0
"""
url = SPREADSHEET_VALUES_URL % (self.id, quote(range))
r = self.client.request('put', url, params=params, json=body)
return r.json() | Lower-level method that directly calls `spreadsheets.values.update <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update>`_.
:param str range: The `A1 notation <https://developers.google.com/sheets/api/guides/concepts#a1_notation>`_ of the values to update.
:param dict params: (optional) `Query parameters <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#query-parameters>`_.
:param dict body: (optional) `Request body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#request-body>`_.
:returns: `Response body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update#response-body>`_.
:rtype: dict
Example::
sh.values_update(
'Sheet1!A2',
params={
'valueInputOption': 'USER_ENTERED'
},
body={
'values': [[1, 2, 3]]
}
)
.. versionadded:: 3.0 |
def _build_file_writer(cls, session: AppSession):
'''Create the File Writer.
Returns:
FileWriter: An instance of :class:`.writer.BaseFileWriter`.
'''
args = session.args
if args.delete_after:
return session.factory.new('FileWriter') # is a NullWriter
elif args.output_document:
session.factory.class_map['FileWriter'] = SingleDocumentWriter
return session.factory.new('FileWriter', args.output_document,
headers_included=args.save_headers)
use_dir = (len(args.urls) != 1 or args.page_requisites
or args.recursive)
if args.use_directories == 'force':
use_dir = True
elif args.use_directories == 'no':
use_dir = False
os_type = 'windows' if 'windows' in args.restrict_file_names \
else 'unix'
ascii_only = 'ascii' in args.restrict_file_names
no_control = 'nocontrol' not in args.restrict_file_names
if 'lower' in args.restrict_file_names:
case = 'lower'
elif 'upper' in args.restrict_file_names:
case = 'upper'
else:
case = None
path_namer = session.factory.new(
'PathNamer',
args.directory_prefix,
index=args.default_page,
use_dir=use_dir,
cut=args.cut_dirs,
protocol=args.protocol_directories,
hostname=args.host_directories,
os_type=os_type,
ascii_only=ascii_only,
no_control=no_control,
case=case,
max_filename_length=args.max_filename_length,
)
if args.recursive or args.page_requisites or args.continue_download:
if args.clobber_method == 'disable':
file_class = OverwriteFileWriter
else:
file_class = IgnoreFileWriter
elif args.timestamping:
file_class = TimestampingFileWriter
else:
file_class = AntiClobberFileWriter
session.factory.class_map['FileWriter'] = file_class
return session.factory.new(
'FileWriter',
path_namer,
file_continuing=args.continue_download,
headers_included=args.save_headers,
local_timestamping=args.use_server_timestamps,
adjust_extension=args.adjust_extension,
content_disposition=args.content_disposition,
trust_server_names=args.trust_server_names,
) | Create the File Writer.
Returns:
FileWriter: An instance of :class:`.writer.BaseFileWriter`. |
def _control(self, state):
""" Control device state.
Possible states are ON or OFF.
:param state: Switch to this state.
"""
# Renew subscription if necessary
if not self._subscription_is_recent():
self._subscribe()
cmd = MAGIC + CONTROL + self._mac + PADDING_1 + PADDING_2 + state
_LOGGER.debug("Sending new state to %s: %s", self.host, ord(state))
ack_state = self._udp_transact(cmd, self._control_resp, state)
if ack_state is None:
raise S20Exception(
"Device didn't acknowledge control request: {}".format(
self.host)) | Control device state.
Possible states are ON or OFF.
:param state: Switch to this state. |
def assert_valid_rule_class(clazz):
"""
Asserts that a given rule clazz is valid by checking a number of its properties:
- Rules must extend from LineRule or CommitRule
- Rule classes must have id and name string attributes.
The options_spec is optional, but if set, it must be a list of gitlint Options.
- Rule classes must have a validate method. In case of a CommitRule, validate must take a single commit parameter.
In case of LineRule, validate must take line and commit as first and second parameters.
- LineRule classes must have a target class attributes that is set to either
CommitMessageTitle or CommitMessageBody.
- User Rule id's cannot start with R, T, B or M as these rule ids are reserved for gitlint itself.
"""
# Rules must extend from LineRule or CommitRule
if not (issubclass(clazz, rules.LineRule) or issubclass(clazz, rules.CommitRule)):
msg = u"User-defined rule class '{0}' must extend from {1}.{2} or {1}.{3}"
raise UserRuleError(msg.format(clazz.__name__, rules.CommitRule.__module__,
rules.LineRule.__name__, rules.CommitRule.__name__))
# Rules must have an id attribute
if not hasattr(clazz, 'id') or clazz.id is None or not clazz.id:
raise UserRuleError(u"User-defined rule class '{0}' must have an 'id' attribute".format(clazz.__name__))
# Rule id's cannot start with gitlint reserved letters
if clazz.id[0].upper() in ['R', 'T', 'B', 'M']:
msg = u"The id '{1}' of '{0}' is invalid. Gitlint reserves ids starting with R,T,B,M"
raise UserRuleError(msg.format(clazz.__name__, clazz.id[0]))
# Rules must have a name attribute
if not hasattr(clazz, 'name') or clazz.name is None or not clazz.name:
raise UserRuleError(u"User-defined rule class '{0}' must have a 'name' attribute".format(clazz.__name__))
# if set, options_spec must be a list of RuleOption
if not isinstance(clazz.options_spec, list):
msg = u"The options_spec attribute of user-defined rule class '{0}' must be a list of {1}.{2}"
raise UserRuleError(msg.format(clazz.__name__, options.RuleOption.__module__, options.RuleOption.__name__))
# check that all items in options_spec are actual gitlint options
for option in clazz.options_spec:
if not isinstance(option, options.RuleOption):
msg = u"The options_spec attribute of user-defined rule class '{0}' must be a list of {1}.{2}"
raise UserRuleError(msg.format(clazz.__name__, options.RuleOption.__module__, options.RuleOption.__name__))
# Rules must have a validate method. We use isroutine() as it's both python 2 and 3 compatible.
# For more info see http://stackoverflow.com/a/17019998/381010
if not hasattr(clazz, 'validate') or not inspect.isroutine(clazz.validate):
raise UserRuleError(u"User-defined rule class '{0}' must have a 'validate' method".format(clazz.__name__))
# LineRules must have a valid target: rules.CommitMessageTitle or rules.CommitMessageBody
if issubclass(clazz, rules.LineRule):
if clazz.target not in [rules.CommitMessageTitle, rules.CommitMessageBody]:
msg = u"The target attribute of the user-defined LineRule class '{0}' must be either {1}.{2} or {1}.{3}"
msg = msg.format(clazz.__name__, rules.CommitMessageTitle.__module__,
rules.CommitMessageTitle.__name__, rules.CommitMessageBody.__name__)
raise UserRuleError(msg) | Asserts that a given rule clazz is valid by checking a number of its properties:
- Rules must extend from LineRule or CommitRule
- Rule classes must have id and name string attributes.
The options_spec is optional, but if set, it must be a list of gitlint Options.
- Rule classes must have a validate method. In case of a CommitRule, validate must take a single commit parameter.
In case of LineRule, validate must take line and commit as first and second parameters.
- LineRule classes must have a target class attributes that is set to either
CommitMessageTitle or CommitMessageBody.
- User Rule id's cannot start with R, T, B or M as these rule ids are reserved for gitlint itself. |
def p_values(self, p):
"""values :
| values value VALUE_SEPARATOR
| values value"""
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1] | values :
| values value VALUE_SEPARATOR
| values value |
def generate_one(self):
"""Generate a single element.
Returns
-------
element
An element from the domain.
Examples
-------
>>> generator = RepellentGenerator(['a', 'b'])
>>> gen_item = generator.generate_one()
>>> gen_item in ['a', 'b']
True
"""
# Get the weights for all items in the domain
weights = [self.probability_func(self.generated[element])
for element in self.domain]
# Sample from the domain using the weights
element = random.choices(self.domain, weights=weights)[0]
# Update the generated values and return
self.generated[element] += 1
return element | Generate a single element.
Returns
-------
element
An element from the domain.
Examples
-------
>>> generator = RepellentGenerator(['a', 'b'])
>>> gen_item = generator.generate_one()
>>> gen_item in ['a', 'b']
True |
def _write_git_file_and_module_config(cls, working_tree_dir, module_abspath):
"""Writes a .git file containing a (preferably) relative path to the actual git module repository.
It is an error if the module_abspath cannot be made into a relative path, relative to the working_tree_dir
:note: will overwrite existing files !
:note: as we rewrite both the git file as well as the module configuration, we might fail on the configuration
and will not roll back changes done to the git file. This should be a non-issue, but may easily be fixed
if it becomes one
:param working_tree_dir: directory to write the .git file into
:param module_abspath: absolute path to the bare repository
"""
git_file = osp.join(working_tree_dir, '.git')
rela_path = osp.relpath(module_abspath, start=working_tree_dir)
if is_win:
if osp.isfile(git_file):
os.remove(git_file)
with open(git_file, 'wb') as fp:
fp.write(("gitdir: %s" % rela_path).encode(defenc))
with GitConfigParser(osp.join(module_abspath, 'config'),
read_only=False, merge_includes=False) as writer:
writer.set_value('core', 'worktree',
to_native_path_linux(osp.relpath(working_tree_dir, start=module_abspath))) | Writes a .git file containing a (preferably) relative path to the actual git module repository.
It is an error if the module_abspath cannot be made into a relative path, relative to the working_tree_dir
:note: will overwrite existing files !
:note: as we rewrite both the git file as well as the module configuration, we might fail on the configuration
and will not roll back changes done to the git file. This should be a non-issue, but may easily be fixed
if it becomes one
:param working_tree_dir: directory to write the .git file into
:param module_abspath: absolute path to the bare repository |
def uninstall(self):
'''
Uninstall the module finder. If not installed, this will do nothing.
After uninstallation, none of the newly loaded modules will be
decorated (that is, everything will be back to normal).
'''
if self.installed:
sys.meta_path.remove(self)
# Reload all decorated items
import_list = []
for name in self.__loaded_modules:
del sys.modules[name]
import_list.append(name)
for name in import_list:
__import__(name)
self.__reset() | Uninstall the module finder. If not installed, this will do nothing.
After uninstallation, none of the newly loaded modules will be
decorated (that is, everything will be back to normal). |
def start(self):
"""Start the controller."""
if self.mode == "manual":
return
if self.ipython_dir != '~/.ipython':
self.ipython_dir = os.path.abspath(os.path.expanduser(self.ipython_dir))
if self.log:
stdout = open(os.path.join(self.ipython_dir, "{0}.controller.out".format(self.profile)), 'w')
stderr = open(os.path.join(self.ipython_dir, "{0}.controller.err".format(self.profile)), 'w')
else:
stdout = open(os.devnull, 'w')
stderr = open(os.devnull, 'w')
try:
opts = [
'ipcontroller',
'' if self.ipython_dir == '~/.ipython' else '--ipython-dir={}'.format(self.ipython_dir),
self.interfaces if self.interfaces is not None else '--ip=*',
'' if self.profile == 'default' else '--profile={0}'.format(self.profile),
'--reuse' if self.reuse else '',
'--location={}'.format(self.public_ip) if self.public_ip else '',
'--port={}'.format(self.port) if self.port is not None else ''
]
if self.port_range is not None:
opts += [
'--HubFactory.hb={0},{1}'.format(self.hb_ping, self.hb_pong),
'--HubFactory.control={0},{1}'.format(self.control_client, self.control_engine),
'--HubFactory.mux={0},{1}'.format(self.mux_client, self.mux_engine),
'--HubFactory.task={0},{1}'.format(self.task_client, self.task_engine)
]
logger.debug("Starting ipcontroller with '{}'".format(' '.join([str(x) for x in opts])))
self.proc = subprocess.Popen(opts, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)
except FileNotFoundError:
msg = "Could not find ipcontroller. Please make sure that ipyparallel is installed and available in your env"
logger.error(msg)
raise ControllerError(msg)
except Exception as e:
msg = "IPPController failed to start: {0}".format(e)
logger.error(msg)
raise ControllerError(msg) | Start the controller. |
def host_info_getter(func, name=None):
"""
The decorated function is added to the process of collecting the host_info.
This just adds the decorated function to the global
``sacred.host_info.host_info_gatherers`` dictionary.
The functions from that dictionary are used when collecting the host info
using :py:func:`~sacred.host_info.get_host_info`.
Parameters
----------
func : callable
A function that can be called without arguments and returns some
json-serializable information.
name : str, optional
The name of the corresponding entry in host_info.
Defaults to the name of the function.
Returns
-------
The function itself.
"""
name = name or func.__name__
host_info_gatherers[name] = func
return func | The decorated function is added to the process of collecting the host_info.
This just adds the decorated function to the global
``sacred.host_info.host_info_gatherers`` dictionary.
The functions from that dictionary are used when collecting the host info
using :py:func:`~sacred.host_info.get_host_info`.
Parameters
----------
func : callable
A function that can be called without arguments and returns some
json-serializable information.
name : str, optional
The name of the corresponding entry in host_info.
Defaults to the name of the function.
Returns
-------
The function itself. |
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
filter_collection = getattr(
configuration_object, '_filter_collection', None)
if not filter_collection:
raise errors.BadConfigObject(
'Filter collection missing from configuration object')
date_filters = getattr(options, 'date_filters', None)
if not date_filters:
return
file_entry_filter = file_entry_filters.DateTimeFileEntryFilter()
for date_filter in date_filters:
date_filter_pieces = date_filter.split(',')
if len(date_filter_pieces) != 3:
raise errors.BadConfigOption(
'Badly formed date filter: {0:s}'.format(date_filter))
time_value, start_time_string, end_time_string = date_filter_pieces
time_value = time_value.strip()
start_time_string = start_time_string.strip()
end_time_string = end_time_string.strip()
try:
file_entry_filter.AddDateTimeRange(
time_value, start_time_string=start_time_string,
end_time_string=end_time_string)
except ValueError:
raise errors.BadConfigOption(
'Badly formed date filter: {0:s}'.format(date_filter))
filter_collection.AddFilter(file_entry_filter) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type. |
def parse_timezone(matches, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if matches["timezone"] == "Z":
return UTC
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if matches["timezone"] is None:
return default_timezone
sign = matches["tz_sign"]
hours = to_int(matches, "tz_hour")
minutes = to_int(matches, "tz_minute", default_to_zero=True)
description = "%s%02d:%02d" % (sign, hours, minutes)
if sign == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, description) | Parses ISO 8601 time zone specs into tzinfo offsets |
def description(self, request, id, description):
"""Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description
"""
request.data = json.dumps({
"description": description
})
return self.send(request, id).json()['html_url'] | Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description |
def size(self, filename: str) -> int:
'''Get size of file.
Coroutine.
'''
yield from self._control_stream.write_command(Command('SIZE', filename))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match('File size', ReplyCodes.file_status, reply)
try:
return int(reply.text.strip())
except ValueError:
return | Get size of file.
Coroutine. |
def create(self):
"""
Calls various methods sequentially in order to fully build the
database.
"""
# Calls each of these methods in order. _populate_from_lines and
# _update_relations must be implemented in subclasses.
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize() | Calls various methods sequentially in order to fully build the
database. |
def user_parse(data):
"""Parse information from the provider."""
yield 'id', data.get('id')
yield 'username', data.get('username')
yield 'discriminator', data.get('discriminator')
yield 'picture', "https://cdn.discordapp.com/avatars/{}/{}.png".format(
data.get('id'), data.get('avatar')) | Parse information from the provider. |
def lemke_howson(g, init_pivot=0, max_iter=10**6, capping=None,
full_output=False):
"""
Find one mixed-action Nash equilibrium of a 2-player normal form
game by the Lemke-Howson algorithm [2]_, implemented with
"complementary pivoting" (see, e.g., von Stengel [3]_ for details).
Parameters
----------
g : NormalFormGame
NormalFormGame instance with 2 players.
init_pivot : scalar(int), optional(default=0)
Initial pivot, an integer k such that 0 <= k < m+n, where
integers 0, ..., m-1 and m, ..., m+n-1 correspond to the actions
of players 0 and 1, respectively.
max_iter : scalar(int), optional(default=10**6)
Maximum number of pivoting steps.
capping : scalar(int), optional(default=None)
If supplied, the routine is executed with the heuristics
proposed by Codenotti et al. [1]_; see Notes below for details.
full_output : bool, optional(default=False)
If False, only the computed Nash equilibrium is returned. If
True, the return value is `(NE, res)`, where `NE` is the Nash
equilibrium and `res` is a `NashResult` object.
Returns
-------
NE : tuple(ndarray(float, ndim=1))
Tuple of computed Nash equilibrium mixed actions.
res : NashResult
Object containing information about the computation. Returned
only when `full_output` is True. See `NashResult` for details.
Examples
--------
Consider the following game from von Stengel [3]_:
>>> np.set_printoptions(precision=4) # Reduce the digits printed
>>> bimatrix = [[(3, 3), (3, 2)],
... [(2, 2), (5, 6)],
... [(0, 3), (6, 1)]]
>>> g = NormalFormGame(bimatrix)
Obtain a Nash equilibrium of this game by `lemke_howson` with player
0's action 1 (out of the three actions 0, 1, and 2) as the initial
pivot:
>>> lemke_howson(g, init_pivot=1)
(array([ 0. , 0.3333, 0.6667]), array([ 0.3333, 0.6667]))
>>> g.is_nash(_)
True
Additional information is returned if `full_output` is set True:
>>> NE, res = lemke_howson(g, init_pivot=1, full_output=True)
>>> res.converged # Whether the routine has converged
True
>>> res.num_iter # Number of pivoting steps performed
4
Notes
-----
* This routine is implemented with floating point arithmetic and
thus is subject to numerical instability.
* If `capping` is set to a positive integer, the routine is executed
with the heuristics proposed by [1]_:
* For k = `init_pivot`, `init_pivot` + 1, ..., `init_pivot` +
(m+n-2), (modulo m+n), the Lemke-Howson algorithm is executed
with k as the initial pivot and `capping` as the maximum number
of pivoting steps. If the algorithm converges during this loop,
then the Nash equilibrium found is returned.
* Otherwise, the Lemke-Howson algorithm is executed with
`init_pivot` + (m+n-1) (modulo m+n) as the initial pivot, with a
limit `max_iter` on the total number of pivoting steps.
Accoding to the simulation results for *uniformly random games*,
for medium- to large-size games this heuristics outperforms the
basic Lemke-Howson algorithm with a fixed initial pivot, where
[1]_ suggests that `capping` be set to 10.
References
----------
.. [1] B. Codenotti, S. De Rossi, and M. Pagan, "An Experimental
Analysis of Lemke-Howson Algorithm," arXiv:0811.3247, 2008.
.. [2] C. E. Lemke and J. T. Howson, "Equilibrium Points of Bimatrix
Games," Journal of the Society for Industrial and Applied
Mathematics (1964), 413-423.
.. [3] B. von Stengel, "Equilibrium Computation for Two-Player Games
in Strategic and Extensive Form," Chapter 3, N. Nisan, T.
Roughgarden, E. Tardos, and V. Vazirani eds., Algorithmic Game
Theory, 2007.
"""
try:
N = g.N
except:
raise TypeError('g must be a 2-player NormalFormGame')
if N != 2:
raise NotImplementedError('Implemented only for 2-player games')
payoff_matrices = g.payoff_arrays
nums_actions = g.nums_actions
total_num = sum(nums_actions)
msg = '`init_pivot` must be an integer k' + \
'such that 0 <= k < {0}'.format(total_num)
if not isinstance(init_pivot, numbers.Integral):
raise TypeError(msg)
if not (0 <= init_pivot < total_num):
raise ValueError(msg)
if capping is None:
capping = max_iter
tableaux = tuple(
np.empty((nums_actions[1-i], total_num+1)) for i in range(N)
)
bases = tuple(np.empty(nums_actions[1-i], dtype=int) for i in range(N))
converged, num_iter, init_pivot_used = \
_lemke_howson_capping(payoff_matrices, tableaux, bases, init_pivot,
max_iter, capping)
NE = _get_mixed_actions(tableaux, bases)
if not full_output:
return NE
res = NashResult(NE=NE,
converged=converged,
num_iter=num_iter,
max_iter=max_iter,
init=init_pivot_used)
return NE, res | Find one mixed-action Nash equilibrium of a 2-player normal form
game by the Lemke-Howson algorithm [2]_, implemented with
"complementary pivoting" (see, e.g., von Stengel [3]_ for details).
Parameters
----------
g : NormalFormGame
NormalFormGame instance with 2 players.
init_pivot : scalar(int), optional(default=0)
Initial pivot, an integer k such that 0 <= k < m+n, where
integers 0, ..., m-1 and m, ..., m+n-1 correspond to the actions
of players 0 and 1, respectively.
max_iter : scalar(int), optional(default=10**6)
Maximum number of pivoting steps.
capping : scalar(int), optional(default=None)
If supplied, the routine is executed with the heuristics
proposed by Codenotti et al. [1]_; see Notes below for details.
full_output : bool, optional(default=False)
If False, only the computed Nash equilibrium is returned. If
True, the return value is `(NE, res)`, where `NE` is the Nash
equilibrium and `res` is a `NashResult` object.
Returns
-------
NE : tuple(ndarray(float, ndim=1))
Tuple of computed Nash equilibrium mixed actions.
res : NashResult
Object containing information about the computation. Returned
only when `full_output` is True. See `NashResult` for details.
Examples
--------
Consider the following game from von Stengel [3]_:
>>> np.set_printoptions(precision=4) # Reduce the digits printed
>>> bimatrix = [[(3, 3), (3, 2)],
... [(2, 2), (5, 6)],
... [(0, 3), (6, 1)]]
>>> g = NormalFormGame(bimatrix)
Obtain a Nash equilibrium of this game by `lemke_howson` with player
0's action 1 (out of the three actions 0, 1, and 2) as the initial
pivot:
>>> lemke_howson(g, init_pivot=1)
(array([ 0. , 0.3333, 0.6667]), array([ 0.3333, 0.6667]))
>>> g.is_nash(_)
True
Additional information is returned if `full_output` is set True:
>>> NE, res = lemke_howson(g, init_pivot=1, full_output=True)
>>> res.converged # Whether the routine has converged
True
>>> res.num_iter # Number of pivoting steps performed
4
Notes
-----
* This routine is implemented with floating point arithmetic and
thus is subject to numerical instability.
* If `capping` is set to a positive integer, the routine is executed
with the heuristics proposed by [1]_:
* For k = `init_pivot`, `init_pivot` + 1, ..., `init_pivot` +
(m+n-2), (modulo m+n), the Lemke-Howson algorithm is executed
with k as the initial pivot and `capping` as the maximum number
of pivoting steps. If the algorithm converges during this loop,
then the Nash equilibrium found is returned.
* Otherwise, the Lemke-Howson algorithm is executed with
`init_pivot` + (m+n-1) (modulo m+n) as the initial pivot, with a
limit `max_iter` on the total number of pivoting steps.
Accoding to the simulation results for *uniformly random games*,
for medium- to large-size games this heuristics outperforms the
basic Lemke-Howson algorithm with a fixed initial pivot, where
[1]_ suggests that `capping` be set to 10.
References
----------
.. [1] B. Codenotti, S. De Rossi, and M. Pagan, "An Experimental
Analysis of Lemke-Howson Algorithm," arXiv:0811.3247, 2008.
.. [2] C. E. Lemke and J. T. Howson, "Equilibrium Points of Bimatrix
Games," Journal of the Society for Industrial and Applied
Mathematics (1964), 413-423.
.. [3] B. von Stengel, "Equilibrium Computation for Two-Player Games
in Strategic and Extensive Form," Chapter 3, N. Nisan, T.
Roughgarden, E. Tardos, and V. Vazirani eds., Algorithmic Game
Theory, 2007. |
def jsonify(resource):
"""Return a Flask ``Response`` object containing a
JSON representation of *resource*.
:param resource: The resource to act as the basis of the response
"""
response = flask.jsonify(resource.to_dict())
response = add_link_headers(response, resource.links())
return response | Return a Flask ``Response`` object containing a
JSON representation of *resource*.
:param resource: The resource to act as the basis of the response |
def object_exists_in_project(obj_id, proj_id):
'''
:param obj_id: object ID
:type obj_id: str
:param proj_id: project ID
:type proj_id: str
Returns True if the specified data object can be found in the specified
project.
'''
if obj_id is None:
raise ValueError("Expected obj_id to be a string")
if proj_id is None:
raise ValueError("Expected proj_id to be a string")
if not is_container_id(proj_id):
raise ValueError('Expected %r to be a container ID' % (proj_id,))
return try_call(dxpy.DXHTTPRequest, '/' + obj_id + '/describe', {'project': proj_id})['project'] == proj_id | :param obj_id: object ID
:type obj_id: str
:param proj_id: project ID
:type proj_id: str
Returns True if the specified data object can be found in the specified
project. |
def score(self, X, y=None, **kwargs):
"""
Generates the Scikit-Learn classification report.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy score
"""
y_pred = self.predict(X)
scores = precision_recall_fscore_support(y, y_pred)
# Calculate the percentage for the support metric
# and store the percent in place of raw support counts
self.support_score_ = scores[-1]
scores = list(scores)
scores[-1] = scores[-1] / scores[-1].sum()
# Create a mapping composed of precision, recall, F1, and support
# to their respective values
scores = map(lambda s: dict(zip(self.classes_, s)), scores)
self.scores_ = dict(zip(SCORES_KEYS, scores))
# Remove support scores if not required
if not self.support:
self.scores_.pop('support')
self.draw()
# Retrieve and store the score attribute from the sklearn classifier
self.score_ = self.estimator.score(X, y)
return self.score_ | Generates the Scikit-Learn classification report.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy score |
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass | Run the given function in an independent python interpreter. |
def get_data_source(self):
"""The method determines data source from product ID.
:return: Data source of the product
:rtype: DataSource
:raises: ValueError
"""
product_type = self.product_id.split('_')[1]
if product_type.endswith('L1C') or product_type == 'OPER':
return DataSource.SENTINEL2_L1C
if product_type.endswith('L2A') or product_type == 'USER':
return DataSource.SENTINEL2_L2A
raise ValueError('Unknown data source of product {}'.format(self.product_id)) | The method determines data source from product ID.
:return: Data source of the product
:rtype: DataSource
:raises: ValueError |
def record_run(record_type, print_session_id, **kwds):
"""
Record shell history.
"""
if print_session_id and record_type != 'init':
raise RuntimeError(
'--print-session-id should be used with --record-type=init')
cfstore = ConfigStore()
# SOMEDAY: Pass a list of environment variables to shell by "rash
# init" and don't read configuration in "rash record" command. It
# is faster.
config = cfstore.get_config()
envkeys = config.record.environ[record_type]
json_path = os.path.join(cfstore.record_path,
record_type,
time.strftime('%Y-%m-%d-%H%M%S.json'))
mkdirp(os.path.dirname(json_path))
# Command line options directly map to record keys
data = dict((k, v) for (k, v) in kwds.items() if v is not None)
data.update(
environ=get_environ(envkeys),
)
# Automatically set some missing variables:
data.setdefault('cwd', getcwd())
if record_type in ['command', 'exit']:
data.setdefault('stop', int(time.time()))
elif record_type in ['init']:
data.setdefault('start', int(time.time()))
if print_session_id:
data['session_id'] = generate_session_id(data)
print(data['session_id'])
with open(json_path, 'w') as fp:
json.dump(data, fp) | Record shell history. |
def vm_ip(cls, vm_id):
"""Return the first usable ip address for this vm.
Returns a (version, ip) tuple."""
vm_info = cls.info(vm_id)
for iface in vm_info['ifaces']:
if iface['type'] == 'private':
continue
for ip in iface['ips']:
return ip['version'], ip['ip'] | Return the first usable ip address for this vm.
Returns a (version, ip) tuple. |
def save_pointings(self):
"""Print the currently defined FOVs"""
import tkFileDialog
f=tkFileDialog.asksaveasfile()
i=0
if self.pointing_format.get()=='CFHT PH':
f.write("""<?xml version = "1.0"?>
<!DOCTYPE ASTRO SYSTEM "http://vizier.u-strasbg.fr/xml/astrores.dtd">
<ASTRO ID="v0.8" xmlns:ASTRO="http://vizier.u-strasbg.fr/doc/astrores.htx">
<TABLE ID="Table">
<NAME>Fixed Targets</NAME>
<TITLE>Fixed Targets for CFHT QSO</TITLE>
<!-- Definition of each field -->
<FIELD name="NAME" datatype="A" width="20">
<DESCRIPTION>Name of target</DESCRIPTION>
</FIELD>
<FIELD name="RA" ref="" datatype="A" width="11" unit=""h:m:s"">
<DESCRIPTION>Right ascension of target</DESCRIPTION>
</FIELD>
<FIELD name="DEC" ref="" datatype="A" width="11" unit=""d:m:s"">
<DESCRIPTION>Declination of target</DESCRIPTION>
</FIELD>
<FIELD name="EPOCH" datatype="F" width="6">
<DESCRIPTION>Epoch of coordinates</DESCRIPTION>
</FIELD>
<FIELD name="POINT" datatype="A" width="5">
<DESCRIPTION>Pointing name</DESCRIPTION>
</FIELD>
<!-- Data table -->
<DATA><CSV headlines="4" colsep="|"><![CDATA[
NAME |RA |DEC |EPOCH |POINT|
|hh:mm:ss.ss|+dd:mm:ss.s| | |
12345678901234567890|12345678901|12345678901|123456|12345|
--------------------|-----------|-----------|------|-----|\n""")
if self.pointing_format.get()=='Palomar':
f.write("index\n")
for pointing in self.pointings:
i=i+1
name=pointing["label"]["text"]
(sra,sdec)=str(pointing["camera"]).split()
ra=sra.split(":")
dec=sdec.split(":")
dec[0]=str(int(dec[0]))
if int(dec[0])>=0:
dec[0]='+'+dec[0]
if self.pointing_format.get()=='Palomar':
f.write( "%5d %16s %2s %2s %4s %3s %2s %4s 2000\n" % (i, name,
ra[0].zfill(2),
ra[1].zfill(2),
ra[2].zfill(2),
dec[0].zfill(3),
dec[1].zfill(2),
dec[2].zfill(2)))
elif self.pointing_format.get()=='CFHT PH':
#f.write("%f %f\n" % (pointing["camera"].ra,pointing["camera"].dec))
f.write("%-20s|%11s|%11s|%6.1f|%-5d|\n" % (name,sra,sdec,2000.0,1))
elif self.pointing_format.get()=='KPNO/CTIO':
str1 = sra.replace(":"," ")
str2 = sdec.replace(":"," ")
f.write("%16s %16s %16s 2000\n" % ( name, str1, str2) )
elif self.pointing_format.get()=='SSim':
ra = []
dec= []
for ccd in pointing["camera"].getGeometry():
ra.append(ccd[0])
ra.append(ccd[2])
dec.append(ccd[1])
dec.append(ccd[3])
import math
dra=math.degrees(math.fabs(max(ra)-min(ra)))
ddec=math.degrees(math.fabs(max(dec)-min(dec)))
f.write("%f %f %16s %16s DATE 1.00 1.00 500 FILE\n" % (dra, ddec, sra, sdec ) )
if self.pointing_format.get()=='CFHT PH':
f.write("""]]</CSV></DATA>
</TABLE>
</ASTRO>
""")
f.close() | Print the currently defined FOVs |
def autodiscover():
"""
Imports all available previews classes.
"""
from django.conf import settings
for application in settings.INSTALLED_APPS:
module = import_module(application)
if module_has_submodule(module, 'emails'):
emails = import_module('%s.emails' % application)
try:
import_module('%s.emails.previews' % application)
except ImportError:
# Only raise the exception if this module contains previews and
# there was a problem importing them. (An emails module that
# does not contain previews is not an error.)
if module_has_submodule(emails, 'previews'):
raise | Imports all available previews classes. |
def minimum_needs_section_header_element(feature, parent):
"""Retrieve minimum needs section header string from definitions."""
_ = feature, parent # NOQA
header = minimum_needs_section_header['string_format']
return header.capitalize() | Retrieve minimum needs section header string from definitions. |
def is_descendant_of_vault(self, id_, vault_id):
"""Tests if an ``Id`` is a descendant of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=vault_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=vault_id) | Tests if an ``Id`` is a descendant of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``. |
def get_docker_network(self, container_id, all_stats):
"""Return the container network usage using the Docker API (v1.0 or higher).
Input: id is the full container id
Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
with:
time_since_update: number of seconds elapsed between the latest grab
rx: Number of byte received
tx: Number of byte transmited
"""
# Init the returned dict
network_new = {}
# Read the rx/tx stats (in bytes)
try:
netcounters = all_stats["networks"]
except KeyError as e:
# all_stats do not have NETWORK information
logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
# No fallback available...
return network_new
# Previous network interface stats are stored in the network_old variable
if not hasattr(self, 'inetcounters_old'):
# First call, we init the network_old var
self.netcounters_old = {}
try:
self.netcounters_old[container_id] = netcounters
except (IOError, UnboundLocalError):
pass
if container_id not in self.netcounters_old:
try:
self.netcounters_old[container_id] = netcounters
except (IOError, UnboundLocalError):
pass
else:
# By storing time data we enable Rx/s and Tx/s calculations in the
# XML/RPC API, which would otherwise be overly difficult work
# for users of the API
try:
network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
network_new['rx'] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"]
network_new['tx'] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"]
network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"]
network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"]
except KeyError as e:
# all_stats do not have INTERFACE information
logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e))
logger.debug(all_stats)
# Save stats to compute next bitrate
self.netcounters_old[container_id] = netcounters
# Return the stats
return network_new | Return the container network usage using the Docker API (v1.0 or higher).
Input: id is the full container id
Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
with:
time_since_update: number of seconds elapsed between the latest grab
rx: Number of byte received
tx: Number of byte transmited |
def location(self):
"""Return the location of the printer."""
try:
return self.data.get('identity').get('location')
except (KeyError, AttributeError):
return self.device_status_simple('') | Return the location of the printer. |
def _run_code(code, run_globals, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper to run code in nominated namespace"""
if init_globals is not None:
run_globals.update(init_globals)
run_globals.update(__name__ = mod_name,
__file__ = mod_fname,
__loader__ = mod_loader,
__package__ = pkg_name)
exec code in run_globals
return run_globals | Helper to run code in nominated namespace |
def open():
global _MATLAB_RELEASE
'''Opens MATLAB using specified connection (or DCOM+ protocol on Windows)where matlab_location '''
if is_win:
ret = MatlabConnection()
ret.open()
return ret
else:
if settings.MATLAB_PATH != 'guess':
matlab_path = settings.MATLAB_PATH + '/bin/matlab'
elif _MATLAB_RELEASE != 'latest':
matlab_path = discover_location(_MATLAB_RELEASE)
else:
# Latest release is found in __init__.by, i.e. higher logical level
raise MatlabReleaseNotFound('Please select a matlab release or set its location.')
try:
ret = MatlabConnection(matlab_path)
ret.open()
except Exception:
#traceback.print_exc(file=sys.stderr)
raise MatlabReleaseNotFound('Could not open matlab, is it in %s?' % matlab_path)
return ret | Opens MATLAB using specified connection (or DCOM+ protocol on Windows)where matlab_location |
def listar(self, id_divisao=None, id_ambiente_logico=None):
"""Lista os ambientes filtrados conforme parâmetros informados.
Se os dois parâmetros têm o valor None então retorna todos os ambientes.
Se o id_divisao é diferente de None então retorna os ambientes filtrados
pelo valor de id_divisao.
Se o id_divisao e id_ambiente_logico são diferentes de None então retorna
os ambientes filtrados por id_divisao e id_ambiente_logico.
:param id_divisao: Identificador da divisão de data center.
:param id_ambiente_logico: Identificador do ambiente lógico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': [{'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >},
... demais ambientes ... ]}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
"""
url = 'ambiente/'
if is_valid_int_param(id_divisao) and not is_valid_int_param(
id_ambiente_logico):
url = 'ambiente/divisao_dc/' + str(id_divisao) + '/'
elif is_valid_int_param(id_divisao) and is_valid_int_param(id_ambiente_logico):
url = 'ambiente/divisao_dc/' + \
str(id_divisao) + '/ambiente_logico/' + str(id_ambiente_logico) + '/'
code, xml = self.submit(None, 'GET', url)
key = 'ambiente'
return get_list_map(self.response(code, xml, [key]), key) | Lista os ambientes filtrados conforme parâmetros informados.
Se os dois parâmetros têm o valor None então retorna todos os ambientes.
Se o id_divisao é diferente de None então retorna os ambientes filtrados
pelo valor de id_divisao.
Se o id_divisao e id_ambiente_logico são diferentes de None então retorna
os ambientes filtrados por id_divisao e id_ambiente_logico.
:param id_divisao: Identificador da divisão de data center.
:param id_ambiente_logico: Identificador do ambiente lógico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': [{'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >},
... demais ambientes ... ]}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta. |
def _get_relationships(model):
"""
Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple
"""
relationships = []
for name, relationship in inspect(model).relationships.items():
class_ = relationship.mapper.class_
if relationship.uselist:
rel = ListRelationship(name, relation=class_.__name__)
else:
rel = Relationship(name, relation=class_.__name__)
relationships.append(rel)
return tuple(relationships) | Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple |
def configure_callbacks(app):
""" Configure application callbacks """
@app.before_request
def before_request():
""" Retrieve menu configuration before every request (this will return
cached version if possible, else reload from database. """
from flask import session
#g.menusystem = helper.generate_menusystem()
session['menusystem'] = helper.generate_menusystem()
print session['menusystem'] | Configure application callbacks |
def _read_input_csv(in_file):
"""Parse useful details from SampleSheet CSV file.
"""
with io.open(in_file, newline=None) as in_handle:
reader = csv.reader(in_handle)
next(reader) # header
for line in reader:
if line: # empty lines
(fc_id, lane, sample_id, genome, barcode) = line[:5]
yield fc_id, lane, sample_id, genome, barcode | Parse useful details from SampleSheet CSV file. |
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-07-10 - Started - Bovy (NYU)
"""
if self.alpha == 2.:
return nu.log(R**2.+z**2.)/2.
else:
return -(R**2.+z**2.)**(1.-self.alpha/2.)/(self.alpha-2.) | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-07-10 - Started - Bovy (NYU) |
def check(self, check_url=None):
"""
Checks whether a server is running.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{self.host}:{self.port}"``.
"""
if check_url is not None:
self.check_url = self._normalize_check_url(check_url)
response = None
sleeped = 0.0
t = datetime.now()
while not response:
try:
response = requests.get(self.check_url, verify=False)
except requests.exceptions.ConnectionError:
if sleeped > self.timeout:
self._kill()
raise LiveAndLetDieError(
'{0} server {1} didn\'t start in specified timeout {2} '
'seconds!\ncommand: {3}'.format(
self.__class__.__name__,
self.check_url,
self.timeout,
' '.join(self.create_command())
)
)
time.sleep(1)
sleeped = _get_total_seconds(datetime.now() - t)
return _get_total_seconds(datetime.now() - t) | Checks whether a server is running.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{self.host}:{self.port}"``. |
def get_sof_term(self, C, rup):
"""
In the case of the upper mantle events separate coefficients
are considered for normal, reverse and strike-slip
"""
if rup.rake <= -45.0 and rup.rake >= -135.0:
# Normal faulting
return C["FN_UM"]
elif rup.rake > 45.0 and rup.rake < 135.0:
# Reverse faulting
return C["FRV_UM"]
else:
# No adjustment for strike-slip faulting
return 0.0 | In the case of the upper mantle events separate coefficients
are considered for normal, reverse and strike-slip |
def __grabHotkeys(self):
"""
Run during startup to grab global and specific hotkeys in all open windows
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Grab global hotkeys in root window
for item in c.globalHotkeys:
if item.enabled:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
# Grab hotkeys without a filter in root window
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
self.__enqueue(self.__recurseTree, self.rootWindow, hotkeys) | Run during startup to grab global and specific hotkeys in all open windows |
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
"""
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
"""
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args | Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2; |
def dump(bqm, fp, vartype_header=False):
"""Dump a binary quadratic model to a string in COOrdinate format."""
for triplet in _iter_triplets(bqm, vartype_header):
fp.write('%s\n' % triplet) | Dump a binary quadratic model to a string in COOrdinate format. |
def run_command(self, command, arg=None, is_eval=False, member_id=None):
"""run command on replica set
if member_id is specified command will be execute on this server
if member_id is not specified command will be execute on the primary
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
member_id - member id
return command's result
"""
logger.debug("run_command({command}, {arg}, {is_eval}, {member_id})".format(**locals()))
mode = is_eval and 'eval' or 'command'
hostname = None
if isinstance(member_id, int):
hostname = self.member_id_to_host(member_id)
result = getattr(self.connection(hostname=hostname).admin, mode)(command, arg)
logger.debug("command result: {result}".format(result=result))
return result | run command on replica set
if member_id is specified command will be execute on this server
if member_id is not specified command will be execute on the primary
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
member_id - member id
return command's result |
def genesis_block_audit(genesis_block_stages, key_bundle=GENESIS_BLOCK_SIGNING_KEYS):
"""
Verify the authenticity of the stages of the genesis block, optionally with a given set of keys.
Return True if valid
Return False if not
"""
gpg2_path = find_gpg2()
if gpg2_path is None:
raise Exception('You must install gpg2 to audit the genesis block, and it must be in your PATH')
log.debug('Loading {} signing key(s)...'.format(len(key_bundle)))
res = load_signing_keys(gpg2_path, [key_bundle[kid] for kid in key_bundle])
if not res:
raise Exception('Failed to install signing keys')
log.debug('Verifying {} signing key(s)...'.format(len(key_bundle)))
res = check_gpg2_keys(gpg2_path, key_bundle.keys())
if not res:
raise Exception('Failed to verify installation of signing keys')
d = tempfile.mkdtemp(prefix='.genesis-block-audit-')
# each entry in genesis_block_stages is a genesis block with its own history
for stage_id, stage in enumerate(genesis_block_stages):
log.debug('Verify stage {}'.format(stage_id))
try:
jsonschema.validate(GENESIS_BLOCK_SCHEMA, stage)
except jsonschema.ValidationError:
shutil.rmtree(d)
log.error('Invalid genesis block -- does not match schema')
raise ValueError('Invalid genesis block')
# all history rows must be signed with a trusted key
for history_id, history_row in enumerate(stage['history']):
with open(os.path.join(d, 'sig'), 'w') as f:
f.write(history_row['signature'])
with open(os.path.join(d, 'hash'), 'w') as f:
f.write(history_row['hash'])
p = subprocess.Popen([gpg2_path, '--verify', os.path.join(d,'sig'), os.path.join(d,'hash')], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
log.error('Failed to verify stage {} history {}'.format(stage_id, history_id))
shutil.rmtree(d)
return False
gb_rows_str = json.dumps(stage['rows'], sort_keys=True, separators=(',',':')) + '\n'
gb_rows_hash = hashlib.sha256(gb_rows_str).hexdigest()
# must match final history row
if gb_rows_hash != stage['history'][-1]['hash']:
log.error('Genesis block stage {} hash mismatch: {} != {}'.format(stage_id, gb_rows_hash, stage['history'][-1]['hash']))
shutil.rmtree(d)
return False
shutil.rmtree(d)
log.info('Genesis block is legitimate')
return True | Verify the authenticity of the stages of the genesis block, optionally with a given set of keys.
Return True if valid
Return False if not |
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
return -R/(R**2.+z**2.)**(self.alpha/2.) | NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-10 - Written - Bovy (NYU) |
def create_row_to_some_id_col_mapping(id_array):
"""
Parameters
----------
id_array : 1D ndarray.
All elements of the array should be ints representing some id related
to the corresponding row.
Returns
-------
rows_to_ids : 2D scipy sparse array.
Will map each row of id_array to the unique values of `id_array`. The
columns of the returned sparse array will correspond to the unique
values of `id_array`, in the order of appearance for each of these
unique values.
"""
# Get the unique ids, in their original order of appearance
original_order_unique_ids = get_original_order_unique_ids(id_array)
# Create a matrix with the same number of rows as id_array but a single
# column for each of the unique IDs. This matrix will associate each row
# as belonging to a particular observation using a one and using a zero to
# show non-association.
rows_to_ids = (id_array[:, None] ==
original_order_unique_ids[None, :]).astype(int)
return rows_to_ids | Parameters
----------
id_array : 1D ndarray.
All elements of the array should be ints representing some id related
to the corresponding row.
Returns
-------
rows_to_ids : 2D scipy sparse array.
Will map each row of id_array to the unique values of `id_array`. The
columns of the returned sparse array will correspond to the unique
values of `id_array`, in the order of appearance for each of these
unique values. |
def on_change_checkout(self):
'''
When you change checkout or checkin update dummy field
-----------------------------------------------------------
@param self: object pointer
@return: raise warning depending on the validation
'''
checkout_date = time.strftime(dt)
checkin_date = time.strftime(dt)
if not (checkout_date and checkin_date):
return {'value': {}}
delta = timedelta(days=1)
dat_a = time.strptime(checkout_date, dt)[:5]
addDays = datetime(*dat_a) + delta
self.dummy = addDays.strftime(dt) | When you change checkout or checkin update dummy field
-----------------------------------------------------------
@param self: object pointer
@return: raise warning depending on the validation |
def create_server(self, *args, **kwargs):
"""
Wraps :meth:`bang.providers.openstack.Nova.create_server` to apply
hpcloud specialization, namely pulling IP addresses from the hpcloud's
non-standard return values.
"""
# hpcloud's management console stuffs all of its tags in a "tags" tag.
# populate it with the stack and role values here only at server
# creation time. what users do with it after server creation is up to
# them.
tags = kwargs['tags']
tags[A.tags.TAGS] = ','.join([
tags.get(A.tags.STACK, ''),
tags.get(A.tags.ROLE, ''),
])
# Don't create an explicit floating IP; gets one
# automatically
if 'floating_ip' not in kwargs:
kwargs['floating_ip'] = False
s = super(HPNova, self).create_server(*args, **kwargs)
return fix_hp_addrs(s) | Wraps :meth:`bang.providers.openstack.Nova.create_server` to apply
hpcloud specialization, namely pulling IP addresses from the hpcloud's
non-standard return values. |
def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size):
""" word embedding + LSTM Projected """
state_names = []
data = S.var('data')
weight = S.var("encoder_weight", stype='row_sparse')
embed = S.sparse.Embedding(data=data, weight=weight, input_dim=vocab_size,
output_dim=num_embed, name='embed', sparse_grad=True)
states = []
outputs = S.Dropout(embed, p=dropout)
for i in range(num_layers):
prefix = 'lstmp%d_' % i
init_h = S.var(prefix + 'init_h', shape=(batch_size, num_proj), init=mx.init.Zero())
init_c = S.var(prefix + 'init_c', shape=(batch_size, nhid), init=mx.init.Zero())
state_names += [prefix + 'init_h', prefix + 'init_c']
lstmp = mx.gluon.contrib.rnn.LSTMPCell(nhid, num_proj, prefix=prefix)
outputs, next_states = lstmp.unroll(bptt, outputs, begin_state=[init_h, init_c], \
layout='NTC', merge_outputs=True)
outputs = S.Dropout(outputs, p=dropout)
states += [S.stop_gradient(s) for s in next_states]
outputs = S.reshape(outputs, shape=(-1, num_proj))
trainable_lstm_args = []
for arg in outputs.list_arguments():
if 'lstmp' in arg and 'init' not in arg:
trainable_lstm_args.append(arg)
return outputs, states, trainable_lstm_args, state_names | word embedding + LSTM Projected |
def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype):
"""Reverse op of _tf_flatten_batch_dims.
Un-flatten the first dimension of x to match all but the last
num_nonbatch_dims dimensions of prototype.
Args:
x: a tf.Tensor with 1 + num_nonbatch_dims dimensions
num_nonbatch_dims: an integer
prototype: a tf.Tensor
Returns:
a tf.Tensor
"""
assert x.shape.ndims == 1 + num_nonbatch_dims
new_shape = (
prototype.shape.as_list()[:-num_nonbatch_dims] + x.shape.as_list()[1:])
assert None not in new_shape
if new_shape != x.shape.as_list():
x = tf.reshape(x, new_shape)
return x | Reverse op of _tf_flatten_batch_dims.
Un-flatten the first dimension of x to match all but the last
num_nonbatch_dims dimensions of prototype.
Args:
x: a tf.Tensor with 1 + num_nonbatch_dims dimensions
num_nonbatch_dims: an integer
prototype: a tf.Tensor
Returns:
a tf.Tensor |
def stop_subscribe(self):
""" This function is used to stop the event loop created when subscribe is called. But this function doesn't
stop the thread and should be avoided until its completely developed.
"""
asyncio.gather(*asyncio.Task.all_tasks()).cancel()
self.event_loop.stop()
self.event_loop.close() | This function is used to stop the event loop created when subscribe is called. But this function doesn't
stop the thread and should be avoided until its completely developed. |
def add_resource(
self,
base_rule,
base_view,
alternate_view=None,
alternate_rule=None,
id_rule=None,
app=None,
):
"""Add route or routes for a resource.
:param str base_rule: The URL rule for the resource. This will be
prefixed by the API prefix.
:param base_view: Class-based view for the resource.
:param alternate_view: If specified, an alternate class-based view for
the resource. Usually, this will be a detail view, when the base
view is a list view.
:param alternate_rule: If specified, the URL rule for the alternate
view. This will be prefixed by the API prefix. This is mutually
exclusive with id_rule, and must not be specified if alternate_view
is not specified.
:type alternate_rule: str or None
:param id_rule: If specified, a suffix to append to base_rule to get
the alternate view URL rule. If alternate_view is specified, and
alternate_rule is not, then this defaults to '<id>'. This is
mutually exclusive with alternate_rule, and must not be specified
if alternate_view is not specified.
:type id_rule: str or None
:param app: If specified, the application to which to add the route(s).
Otherwise, this will be the bound application, if present.
"""
if alternate_view:
if not alternate_rule:
id_rule = id_rule or DEFAULT_ID_RULE
alternate_rule = posixpath.join(base_rule, id_rule)
else:
assert id_rule is None
else:
assert alternate_rule is None
assert id_rule is None
app = self._get_app(app)
endpoint = self._get_endpoint(base_view, alternate_view)
# Store the view rules for reference. Doesn't support multiple routes
# mapped to same view.
views = app.extensions['resty'].views
base_rule_full = '{}{}'.format(self.prefix, base_rule)
base_view_func = base_view.as_view(endpoint)
if not alternate_view:
app.add_url_rule(base_rule_full, view_func=base_view_func)
views[base_view] = Resource(base_view, base_rule_full)
return
alternate_rule_full = '{}{}'.format(self.prefix, alternate_rule)
alternate_view_func = alternate_view.as_view(endpoint)
@functools.wraps(base_view_func)
def view_func(*args, **kwargs):
if flask.request.url_rule.rule == base_rule_full:
return base_view_func(*args, **kwargs)
else:
return alternate_view_func(*args, **kwargs)
app.add_url_rule(
base_rule_full, view_func=view_func, endpoint=endpoint,
methods=base_view.methods,
)
app.add_url_rule(
alternate_rule_full, view_func=view_func, endpoint=endpoint,
methods=alternate_view.methods,
)
views[base_view] = Resource(base_view, base_rule_full)
views[alternate_view] = Resource(alternate_view, alternate_rule_full) | Add route or routes for a resource.
:param str base_rule: The URL rule for the resource. This will be
prefixed by the API prefix.
:param base_view: Class-based view for the resource.
:param alternate_view: If specified, an alternate class-based view for
the resource. Usually, this will be a detail view, when the base
view is a list view.
:param alternate_rule: If specified, the URL rule for the alternate
view. This will be prefixed by the API prefix. This is mutually
exclusive with id_rule, and must not be specified if alternate_view
is not specified.
:type alternate_rule: str or None
:param id_rule: If specified, a suffix to append to base_rule to get
the alternate view URL rule. If alternate_view is specified, and
alternate_rule is not, then this defaults to '<id>'. This is
mutually exclusive with alternate_rule, and must not be specified
if alternate_view is not specified.
:type id_rule: str or None
:param app: If specified, the application to which to add the route(s).
Otherwise, this will be the bound application, if present. |
def search_commits(self, query, sort=github.GithubObject.NotSet, order=github.GithubObject.NotSet, **qualifiers):
"""
:calls: `GET /search/commits <http://developer.github.com/v3/search>`_
:param query: string
:param sort: string ('author-date', 'committer-date')
:param order: string ('asc', 'desc')
:param qualifiers: keyword dict query qualifiers
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit`
"""
assert isinstance(query, (str, unicode)), query
url_parameters = dict()
if sort is not github.GithubObject.NotSet: # pragma no branch (Should be covered)
assert sort in ('author-date', 'committer-date'), sort
url_parameters["sort"] = sort
if order is not github.GithubObject.NotSet: # pragma no branch (Should be covered)
assert order in ('asc', 'desc'), order
url_parameters["order"] = order
query_chunks = []
if query: # pragma no branch (Should be covered)
query_chunks.append(query)
for qualifier, value in qualifiers.items():
query_chunks.append("%s:%s" % (qualifier, value))
url_parameters["q"] = ' '.join(query_chunks)
assert url_parameters["q"], "need at least one qualifier"
return github.PaginatedList.PaginatedList(
github.Commit.Commit,
self.__requester,
"/search/commits",
url_parameters,
headers={
"Accept": Consts.mediaTypeCommitSearchPreview
}
) | :calls: `GET /search/commits <http://developer.github.com/v3/search>`_
:param query: string
:param sort: string ('author-date', 'committer-date')
:param order: string ('asc', 'desc')
:param qualifiers: keyword dict query qualifiers
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit` |
def clonemedium(medium,
uuid_in=None,
file_in=None,
uuid_out=None,
file_out=None,
mformat=None,
variant=None,
existing=False,
**kwargs):
'''
Clone a new VM from an existing VM
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.clonemedium <name> <new_name>
'''
params = ''
valid_mediums = ('disk', 'dvd', 'floppy')
if medium in valid_mediums:
params += medium
else:
raise CommandExecutionError(
'Medium must be one of: {0}.'.format(', '.join(valid_mediums))
)
if (uuid_in and file_in) or (not uuid_in and not file_in):
raise CommandExecutionError(
'Either uuid_in or file_in must be used, but not both.'
)
if uuid_in:
if medium == 'disk':
item = 'hdds'
elif medium == 'dvd':
item = 'dvds'
elif medium == 'floppy':
item = 'floppies'
items = list_items(item)
if uuid_in not in items:
raise CommandExecutionError('UUID {0} was not found'.format(uuid_in))
params += ' ' + uuid_in
elif file_in:
if not os.path.exists(file_in):
raise CommandExecutionError('File {0} was not found'.format(file_in))
params += ' ' + file_in
if (uuid_out and file_out) or (not uuid_out and not file_out):
raise CommandExecutionError(
'Either uuid_out or file_out must be used, but not both.'
)
if uuid_out:
params += ' ' + uuid_out
elif file_out:
try:
salt.utils.files.fopen(file_out, 'w').close() # pylint: disable=resource-leakage
os.unlink(file_out)
params += ' ' + file_out
except OSError:
raise CommandExecutionError('{0} is not a valid filename'.format(file_out))
if mformat:
valid_mformat = ('VDI', 'VMDK', 'VHD', 'RAW')
if mformat not in valid_mformat:
raise CommandExecutionError(
'If specified, mformat must be one of: {0}'.format(', '.join(valid_mformat))
)
else:
params += ' --format ' + mformat
valid_variant = ('Standard', 'Fixed', 'Split2G', 'Stream', 'ESX')
if variant and variant not in valid_variant:
if not os.path.exists(file_in):
raise CommandExecutionError(
'If specified, variant must be one of: {0}'.format(', '.join(valid_variant))
)
else:
params += ' --variant ' + variant
if existing:
params += ' --existing'
cmd = '{0} clonemedium {1}'.format(vboxcmd(), params)
ret = salt.modules.cmdmod.run_all(cmd)
if ret['retcode'] == 0:
return True
return ret['stderr'] | Clone a new VM from an existing VM
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.clonemedium <name> <new_name> |
def save(self, inplace=True):
"""
Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance.
"""
modified_data = self._modified_data()
if bool(modified_data):
extra = {
'resource': self.__class__.__name__,
'query': {
'id': self.id,
'modified_data': modified_data
}
}
logger.info('Saving marker', extra=extra)
data = self._api.patch(url=self._URL['get'].format(id=self.id),
data=modified_data).json()
marker = Marker(api=self._api, **data)
return marker
else:
raise ResourceNotModified() | Saves all modification to the marker on the server.
:param inplace Apply edits on the current instance or get a new one.
:return: Marker instance. |
def _to_dict(self, node):
"""convert (key, value) stored in this and the descendant nodes
to dict items.
:param node: node in form of list, or BLANK_NODE
.. note::
Here key is in full form, rather than key of the individual node
"""
if node == BLANK_NODE:
return {}
node_type = self._get_node_type(node)
if is_key_value_type(node_type):
nibbles = without_terminator(unpack_to_nibbles(node[0]))
key = b'+'.join([to_string(x) for x in nibbles])
if node_type == NODE_TYPE_EXTENSION:
sub_dict = self._to_dict(self._decode_to_node(node[1]))
else:
sub_dict = {to_string(NIBBLE_TERMINATOR): node[1]}
# prepend key of this node to the keys of children
res = {}
for sub_key, sub_value in sub_dict.items():
full_key = (key + b'+' + sub_key).strip(b'+')
res[full_key] = sub_value
return res
elif node_type == NODE_TYPE_BRANCH:
res = {}
for i in range(16):
sub_dict = self._to_dict(self._decode_to_node(node[i]))
for sub_key, sub_value in sub_dict.items():
full_key = (
str_to_bytes(
str(i)) +
b'+' +
sub_key).strip(b'+')
res[full_key] = sub_value
if node[16]:
res[to_string(NIBBLE_TERMINATOR)] = node[-1]
return res | convert (key, value) stored in this and the descendant nodes
to dict items.
:param node: node in form of list, or BLANK_NODE
.. note::
Here key is in full form, rather than key of the individual node |
def get_hkr_state(self):
"""Get the thermostate state."""
self.update()
try:
return {
126.5: 'off',
127.0: 'on',
self.eco_temperature: 'eco',
self.comfort_temperature: 'comfort'
}[self.target_temperature]
except KeyError:
return 'manual' | Get the thermostate state. |
def drawDisplay( self, painter, option, rect, text ):
"""
Handles the display drawing for this delegate.
:param painter | <QPainter>
option | <QStyleOption>
rect | <QRect>
text | <str>
"""
painter.setBrush(Qt.NoBrush)
painter.drawText(rect.left() + 3,
rect.top(),
rect.width() - 3,
rect.height(),
option.displayAlignment,
text) | Handles the display drawing for this delegate.
:param painter | <QPainter>
option | <QStyleOption>
rect | <QRect>
text | <str> |
async def _notify_update(self, name, change_type, change_info=None, directed_client=None):
"""Notify updates on a service to anyone who cares."""
for monitor in self._monitors:
try:
result = monitor(name, change_type, change_info, directed_client=directed_client)
if inspect.isawaitable(result):
await result
except Exception:
# We can't allow any exceptions in a monitor routine to break the server.
self._logger.warning("Error calling monitor with update %s", name, exc_info=True) | Notify updates on a service to anyone who cares. |
def gzip_dir(path, compresslevel=6):
"""
Gzips all files in a directory. Note that this is different from
shutil.make_archive, which creates a tar archive. The aim of this method
is to create gzipped files that can still be read using common Unix-style
commands like zless or zcat.
Args:
path (str): Path to directory.
compresslevel (int): Level of compression, 1-9. 9 is default for
GzipFile, 6 is default for gzip.
"""
for f in os.listdir(path):
full_f = os.path.join(path, f)
if not f.lower().endswith("gz"):
with open(full_f, 'rb') as f_in, \
GzipFile('{}.gz'.format(full_f), 'wb',
compresslevel=compresslevel) as f_out:
shutil.copyfileobj(f_in, f_out)
shutil.copystat(full_f,'{}.gz'.format(full_f))
os.remove(full_f) | Gzips all files in a directory. Note that this is different from
shutil.make_archive, which creates a tar archive. The aim of this method
is to create gzipped files that can still be read using common Unix-style
commands like zless or zcat.
Args:
path (str): Path to directory.
compresslevel (int): Level of compression, 1-9. 9 is default for
GzipFile, 6 is default for gzip. |
def get_instance(self, payload):
"""
Build an instance of FaxMediaInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.fax.v1.fax.fax_media.FaxMediaInstance
:rtype: twilio.rest.fax.v1.fax.fax_media.FaxMediaInstance
"""
return FaxMediaInstance(self._version, payload, fax_sid=self._solution['fax_sid'], ) | Build an instance of FaxMediaInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.fax.v1.fax.fax_media.FaxMediaInstance
:rtype: twilio.rest.fax.v1.fax.fax_media.FaxMediaInstance |
def __analizar_evento(self, ret):
"Comprueba y extrae el wvento informativo si existen en la respuesta XML"
evt = ret.get('evento')
if evt:
self.Eventos = [evt]
self.Evento = "%(codigo)s: %(descripcion)s" % evt | Comprueba y extrae el wvento informativo si existen en la respuesta XML |
def _parse_saved_model(path):
"""Reads the savedmodel.pb file containing `SavedModel`."""
# Based on tensorflow/python/saved_model/loader.py implementation.
path_to_pb = _get_saved_model_proto_path(path)
file_content = tf_v1.gfile.Open(path_to_pb, "rb").read()
saved_model = saved_model_pb2.SavedModel()
try:
saved_model.ParseFromString(file_content)
except message.DecodeError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e)))
return saved_model | Reads the savedmodel.pb file containing `SavedModel`. |
def check_user(self, todays_facts):
"""check if we need to notify user perhaps"""
interval = self.conf_notify_interval
if interval <= 0 or interval >= 121:
return
now = dt.datetime.now()
message = None
last_activity = todays_facts[-1] if todays_facts else None
# update duration of current task
if last_activity and not last_activity['end_time']:
delta = now - last_activity['start_time']
duration = delta.seconds / 60
if duration and duration % interval == 0:
message = _("Working on %s") % last_activity['name']
self.notify_user(message)
elif self.conf_notify_on_idle:
#if we have no last activity, let's just calculate duration from 00:00
if (now.minute + now.hour * 60) % interval == 0:
self.notify_user(_("No activity")) | check if we need to notify user perhaps |
def get_notifications(self, **params):
"""https://developers.coinbase.com/api/v2#list-notifications"""
response = self._get('v2', 'notifications', params=params)
return self._make_api_object(response, Notification) | https://developers.coinbase.com/api/v2#list-notifications |
def get_queryset(self):
"""
Returns queryset instance.
:rtype: django.db.models.query.QuerySet.
"""
queryset = super(IndexView, self).get_queryset()
search_form = self.get_search_form()
if search_form.is_valid():
query_str = search_form.cleaned_data.get('q', '').strip()
queryset = self.model.objects.search(query_str)
return queryset | Returns queryset instance.
:rtype: django.db.models.query.QuerySet. |
def extrap_sec(data, dist, depth, w1=1.0, w2=0):
"""
Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable
"""
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data | Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable |
def get_keys_from_ldap(self, username=None):
"""
Fetch keys from ldap.
Args:
username Username associated with keys to fetch (optional)
Returns:
Array of dictionaries in '{username: [public keys]}' format
"""
result_dict = {}
filter = ['(sshPublicKey=*)']
if username is not None:
filter.append('(uid={})'.format(username))
attributes = ['uid', 'sshPublicKey']
results = self.client.search(filter, attributes)
for result in results:
result_dict[result.uid.value] = result.sshPublicKey.values
return result_dict | Fetch keys from ldap.
Args:
username Username associated with keys to fetch (optional)
Returns:
Array of dictionaries in '{username: [public keys]}' format |
def pack(fmt, *args, **kwargs):
"""pack(fmt, v1, v2, ..., endian=None, target=None)
Return a string containing the values v1, v2, ... packed according to the
given format. The actual packing is performed by ``struct.pack`` but the
byte order will be set according to the given `endian`, `target` or
byte order of the global target.
Args:
fmt(str): The format string.
v1,v2,...: The values to pack.
endian(:class:`~pwnypack.target.Target.Endian`): Override the default
byte order. If ``None``, it will look at the byte order of
the ``target`` argument.
target(:class:`~pwnypack.target.Target`): Override the default byte
order. If ``None``, it will look at the byte order of
the global :data:`~pwnypack.target.target`.
Returns:
bytes: The provided values packed according to the format.
"""
endian, target = kwargs.get('endian'), kwargs.get('target')
endian = endian if endian is not None else target.endian if target is not None else pwnypack.target.target.endian
if fmt and fmt[0] not in '@=<>!':
if endian is pwnypack.target.Target.Endian.little:
fmt = '<' + fmt
elif endian is pwnypack.target.Target.Endian.big:
fmt = '>' + fmt
else:
raise NotImplementedError('Unsupported endianness: %s' % endian)
return struct.pack(fmt, *args) | pack(fmt, v1, v2, ..., endian=None, target=None)
Return a string containing the values v1, v2, ... packed according to the
given format. The actual packing is performed by ``struct.pack`` but the
byte order will be set according to the given `endian`, `target` or
byte order of the global target.
Args:
fmt(str): The format string.
v1,v2,...: The values to pack.
endian(:class:`~pwnypack.target.Target.Endian`): Override the default
byte order. If ``None``, it will look at the byte order of
the ``target`` argument.
target(:class:`~pwnypack.target.Target`): Override the default byte
order. If ``None``, it will look at the byte order of
the global :data:`~pwnypack.target.target`.
Returns:
bytes: The provided values packed according to the format. |
def set_fingerprint(fullpath, fingerprint=None):
""" Set the last known modification time for a file """
try:
fingerprint = fingerprint or utils.file_fingerprint(fullpath)
record = model.FileFingerprint.get(file_path=fullpath)
if record:
record.set(fingerprint=fingerprint,
file_mtime=os.stat(fullpath).st_mtime)
else:
record = model.FileFingerprint(
file_path=fullpath,
fingerprint=fingerprint,
file_mtime=os.stat(fullpath).st_mtime)
orm.commit()
except FileNotFoundError:
orm.delete(fp for fp in model.FileFingerprint if fp.file_path == fullpath) | Set the last known modification time for a file |
def transform(self, vector):
"""
Applies transformation on a vector or an RDD[Vector].
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param vector: Vector or RDD of Vector to be transformed.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return self.call("transform", vector) | Applies transformation on a vector or an RDD[Vector].
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param vector: Vector or RDD of Vector to be transformed. |
def get(self, timeout=None, block=True):
"""
Return the next enqueued object, or sleep waiting for one.
:param float timeout:
If not :data:`None`, specifies a timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the latch is empty.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the object is no longer valid.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:returns:
The de-queued object.
"""
_vv and IOLOG.debug('%r.get(timeout=%r, block=%r)',
self, timeout, block)
self._lock.acquire()
try:
if self.closed:
raise LatchError()
i = len(self._sleeping)
if len(self._queue) > i:
_vv and IOLOG.debug('%r.get() -> %r', self, self._queue[i])
return self._queue.pop(i)
if not block:
raise TimeoutError()
rsock, wsock = self._get_socketpair()
cookie = self._make_cookie()
self._sleeping.append((wsock, cookie))
finally:
self._lock.release()
poller = self.poller_class()
poller.start_receive(rsock.fileno())
try:
return self._get_sleep(poller, timeout, block, rsock, wsock, cookie)
finally:
poller.close() | Return the next enqueued object, or sleep waiting for one.
:param float timeout:
If not :data:`None`, specifies a timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the latch is empty.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the object is no longer valid.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:returns:
The de-queued object. |
def Update(self, attribute=None):
"""Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes: CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed above.
Returns:
The Flow ID that is pending
Raises:
IOError: If there has been an error starting the flow.
"""
# client id is the first path element
client_id = self.urn.Split()[0]
if attribute == "CONTAINS":
# Get the pathspec for this object
flow_id = flow.StartAFF4Flow(
client_id=client_id,
# Dependency loop: aff4_objects/aff4_grr.py depends on
# aff4_objects/standard.py that depends on flows/general/filesystem.py
# that eventually depends on aff4_objects/aff4_grr.py
# flow_name=filesystem.ListDirectory.__name__,
flow_name="ListDirectory",
pathspec=self.real_pathspec,
notify_to_user=False,
token=self.token)
return flow_id | Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes: CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed above.
Returns:
The Flow ID that is pending
Raises:
IOError: If there has been an error starting the flow. |
def uniform_spacings(N):
""" Generate ordered uniform variates in O(N) time.
Parameters
----------
N: int (>0)
the expected number of uniform variates
Returns
-------
(N,) float ndarray
the N ordered variates (ascending order)
Note
----
This is equivalent to::
from numpy import random
u = sort(random.rand(N))
but the line above has complexity O(N*log(N)), whereas the algorithm
used here has complexity O(N).
"""
z = np.cumsum(-np.log(random.rand(N + 1)))
return z[:-1] / z[-1] | Generate ordered uniform variates in O(N) time.
Parameters
----------
N: int (>0)
the expected number of uniform variates
Returns
-------
(N,) float ndarray
the N ordered variates (ascending order)
Note
----
This is equivalent to::
from numpy import random
u = sort(random.rand(N))
but the line above has complexity O(N*log(N)), whereas the algorithm
used here has complexity O(N). |
def running_apps(device_id):
""" Get running apps via HTTP GET. """
if not is_valid_device_id(device_id):
abort(403)
if device_id not in devices:
abort(404)
return jsonify(running_apps=devices[device_id].running_apps) | Get running apps via HTTP GET. |
def f_remove_child(self, name, recursive=False, predicate=None):
"""Removes a child of the group.
Note that groups and leaves are only removed from the current trajectory in RAM.
If the trajectory is stored to disk, this data is not affected. Thus, removing children
can be only be used to free RAM memory!
If you want to free memory on disk via your storage service,
use :func:`~pypet.trajectory.Trajectory.f_delete_items` of your trajectory.
:param name:
Name of child, naming by grouping is NOT allowed ('groupA.groupB.childC'),
child must be direct successor of current node.
:param recursive:
Must be true if child is a group that has children. Will remove
the whole subtree in this case. Otherwise a Type Error is thrown.
:param predicate:
Predicate which can evaluate for each node to ``True`` in order to remove the node or
``False`` if the node should be kept. Leave ``None`` if you want to remove all nodes.
:raises:
TypeError if recursive is false but there are children below the node.
ValueError if child does not exist.
"""
if name not in self._children:
raise ValueError('Your group `%s` does not contain the child `%s`.' %
(self.v_full_name, name))
else:
child = self._children[name]
if (name not in self._links and
not child.v_is_leaf and
child.f_has_children() and
not recursive):
raise TypeError('Cannot remove child. It is a group with children. Use'
' f_remove with ``recursive = True``')
else:
self._nn_interface._remove_subtree(self, name, predicate) | Removes a child of the group.
Note that groups and leaves are only removed from the current trajectory in RAM.
If the trajectory is stored to disk, this data is not affected. Thus, removing children
can be only be used to free RAM memory!
If you want to free memory on disk via your storage service,
use :func:`~pypet.trajectory.Trajectory.f_delete_items` of your trajectory.
:param name:
Name of child, naming by grouping is NOT allowed ('groupA.groupB.childC'),
child must be direct successor of current node.
:param recursive:
Must be true if child is a group that has children. Will remove
the whole subtree in this case. Otherwise a Type Error is thrown.
:param predicate:
Predicate which can evaluate for each node to ``True`` in order to remove the node or
``False`` if the node should be kept. Leave ``None`` if you want to remove all nodes.
:raises:
TypeError if recursive is false but there are children below the node.
ValueError if child does not exist. |
def transform(self, audio_f=None, jam=None, y=None, sr=None, crop=False):
'''Apply the transformations to an audio file, and optionally JAMS object.
Parameters
----------
audio_f : str
Path to audio file
jam : optional, `jams.JAMS`, str or file-like
Optional JAMS object/path to JAMS file/open file descriptor.
If provided, this will provide data for task transformers.
y : np.ndarray
sr : number > 0
If provided, operate directly on an existing audio buffer `y` at
sampling rate `sr` rather than load from `audio_f`.
crop : bool
If `True`, then data are cropped to a common time index across all
fields. Otherwise, data may have different time extents.
Returns
-------
data : dict
Data dictionary containing the transformed audio (and annotations)
Raises
------
ParameterError
At least one of `audio_f` or `(y, sr)` must be provided.
'''
if y is None:
if audio_f is None:
raise ParameterError('At least one of `y` or `audio_f` '
'must be provided')
# Load the audio
y, sr = librosa.load(audio_f, sr=sr, mono=True)
if sr is None:
raise ParameterError('If audio is provided as `y`, you must '
'specify the sampling rate as sr=')
if jam is None:
jam = jams.JAMS()
jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr)
# Load the jams
if not isinstance(jam, jams.JAMS):
jam = jams.load(jam)
data = dict()
for operator in self.ops:
if isinstance(operator, BaseTaskTransformer):
data.update(operator.transform(jam))
elif isinstance(operator, FeatureExtractor):
data.update(operator.transform(y, sr))
if crop:
data = self.crop(data)
return data | Apply the transformations to an audio file, and optionally JAMS object.
Parameters
----------
audio_f : str
Path to audio file
jam : optional, `jams.JAMS`, str or file-like
Optional JAMS object/path to JAMS file/open file descriptor.
If provided, this will provide data for task transformers.
y : np.ndarray
sr : number > 0
If provided, operate directly on an existing audio buffer `y` at
sampling rate `sr` rather than load from `audio_f`.
crop : bool
If `True`, then data are cropped to a common time index across all
fields. Otherwise, data may have different time extents.
Returns
-------
data : dict
Data dictionary containing the transformed audio (and annotations)
Raises
------
ParameterError
At least one of `audio_f` or `(y, sr)` must be provided. |
def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage` ,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_augassign(context=context)
return [
result
for result in results
if isinstance(result, util.BadBinaryOperationMessage)
]
except exceptions.InferenceError:
return [] | Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage` ,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage) |
def __get_ml_configuration_status(self, job_id):
"""
After invoking the create_ml_configuration async method, you can use this method to
check on the status of the builder job.
:param job_id: The identifier returned from create_ml_configuration
:return: Job status
"""
failure_message = "Get status on ml configuration failed"
response = self._get_success_json(self._get(
'v1/descriptors/builders/simple/default/' + job_id + '/status', None, failure_message=failure_message))[
'data']
return response | After invoking the create_ml_configuration async method, you can use this method to
check on the status of the builder job.
:param job_id: The identifier returned from create_ml_configuration
:return: Job status |
def add(self, pattern):
"""Decorator to add new dispatch functions."""
def wrap(f):
self.functions.append((f, pattern))
return f
return wrap | Decorator to add new dispatch functions. |
def pool_delete(storage_pool, logger):
"""Storage Pool deletion, removes all the created disk images within the pool and the pool itself."""
path = etree.fromstring(storage_pool.XMLDesc(0)).find('.//path').text
volumes_delete(storage_pool, logger)
try:
storage_pool.destroy()
except libvirt.libvirtError:
logger.exception("Unable to delete storage pool.")
try:
if os.path.exists(path):
shutil.rmtree(path)
except EnvironmentError:
logger.exception("Unable to delete storage pool folder.") | Storage Pool deletion, removes all the created disk images within the pool and the pool itself. |
def update_floatingip_statuses_cfg(self, context, router_id, fip_statuses):
"""Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value
"""
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in six.iteritems(fip_statuses):
LOG.debug("New status for floating IP %(floatingip_id)s: "
"%(status)s", {'floatingip_id': floatingip_id,
'status': status})
try:
self._l3plugin.update_floatingip_status(
context, floatingip_id, status)
except l3_exceptions.FloatingIPNotFound:
LOG.debug("Floating IP: %s no longer present.",
floatingip_id)
# Find all floating IPs known to have been the given router
# for which an update was not received. Set them DOWN mercilessly
# This situation might occur for some asynchronous backends if
# notifications were missed
known_router_fips = self._l3plugin.get_floatingips(
context, {'last_known_router_id': [router_id]})
# Consider only floating ips which were disassociated in the API
fips_to_disable = (fip['id'] for fip in known_router_fips
if not fip['router_id'])
for fip_id in fips_to_disable:
LOG.debug("update_fip_statuses: disable: %s", fip_id)
self._l3plugin.update_floatingip_status(
context, fip_id, bc.constants.FLOATINGIP_STATUS_DOWN) | Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value |
def _get_centered_z1pt0(self, sites):
"""
Get z1pt0 centered on the Vs30- dependent avarage z1pt0(m)
California and non-Japan regions
"""
#: California and non-Japan regions
mean_z1pt0 = (-7.15 / 4.) * np.log(((sites.vs30) ** 4. + 570.94 ** 4.)
/ (1360 ** 4. + 570.94 ** 4.))
centered_z1pt0 = sites.z1pt0 - np.exp(mean_z1pt0)
return centered_z1pt0 | Get z1pt0 centered on the Vs30- dependent avarage z1pt0(m)
California and non-Japan regions |
def finalize_canonical_averages(
number_of_nodes, ps, canonical_averages, alpha,
):
"""
Finalize canonical averages
"""
spanning_cluster = (
(
'percolation_probability_mean' in
canonical_averages.dtype.names
) and
'percolation_probability_m2' in canonical_averages.dtype.names
)
# append values of p as an additional field
ret = np.empty_like(
canonical_averages,
dtype=finalized_canonical_averages_dtype(
spanning_cluster=spanning_cluster
),
)
n = canonical_averages['number_of_runs']
sqrt_n = np.sqrt(canonical_averages['number_of_runs'])
ret['number_of_runs'] = n
ret['p'] = ps
ret['alpha'] = alpha
def _transform(
original_key, final_key=None, normalize=False, transpose=False,
):
if final_key is None:
final_key = original_key
keys_mean = [
'{}_mean'.format(key)
for key in [original_key, final_key]
]
keys_std = [
'{}_m2'.format(original_key),
'{}_std'.format(final_key),
]
key_ci = '{}_ci'.format(final_key)
# calculate sample mean
ret[keys_mean[1]] = canonical_averages[keys_mean[0]]
if normalize:
ret[keys_mean[1]] /= number_of_nodes
# calculate sample standard deviation
array = canonical_averages[keys_std[0]]
result = np.sqrt(
(array.T if transpose else array) / (n - 1)
)
ret[keys_std[1]] = (
result.T if transpose else result
)
if normalize:
ret[keys_std[1]] /= number_of_nodes
# calculate standard normal confidence interval
array = ret[keys_std[1]]
scale = (array.T if transpose else array) / sqrt_n
array = ret[keys_mean[1]]
mean = (array.T if transpose else array)
result = scipy.stats.t.interval(
1 - alpha,
df=n - 1,
loc=mean,
scale=scale,
)
(
ret[key_ci][..., 0], ret[key_ci][..., 1]
) = ([my_array.T for my_array in result] if transpose else result)
if spanning_cluster:
_transform('percolation_probability')
_transform('max_cluster_size', 'percolation_strength', normalize=True)
_transform('moments', normalize=True, transpose=True)
return ret | Finalize canonical averages |
def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
if len(extras) == 1:
verb = "was"
else:
verb = "were"
return ", ".join(repr(extra) for extra in extras), verb | Create an error message for extra items or properties. |
def check_token_payment(name, token_price, stacks_payment_info):
"""
Check a token payment was enough and was of the right type
Return {'status': True, 'tokens_paid': ..., 'token_units': ...} if so
Return {'status': False} if not
"""
token_units = stacks_payment_info['token_units']
tokens_paid = stacks_payment_info['tokens_paid']
tokens_paid = int(tokens_paid)
# did the preorder/renewer pay the *right* tokens?
if token_units != TOKEN_TYPE_STACKS:
log.warning('Account paid in {}, but this namespace only accepts {}'.format(token_units, TOKEN_TYPE_STACKS))
return {'status': False}
# did we pay enough?
if tokens_paid < token_price:
# not enough!
log.warning("Name buyer paid {} {}s, but '{}' costs {} units of {}s".format(tokens_paid, token_units, name, token_price, token_units))
return {'status': False}
return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units} | Check a token payment was enough and was of the right type
Return {'status': True, 'tokens_paid': ..., 'token_units': ...} if so
Return {'status': False} if not |
def get(self, names, country_id=None, language_id=None, retheader=False):
"""
Look up gender for a list of names.
Can optionally refine search with locale info.
May make multiple requests if there are more names than
can be retrieved in one call.
:param names: List of names.
:type names: Iterable[str]
:param country_id: Optional ISO 3166-1 alpha-2 country code.
:type country_id: Optional[str]
:param language_id: Optional ISO 639-1 language code.
:type language_id: Optional[str]
:param retheader: Optional
:type retheader: Optional[boolean]
:return:
If retheader is False:
List of dicts containing 'name', 'gender',
'probability', 'count' keys. If 'gender' is None,
'probability' and 'count' will be omitted.
else:
A dict containing 'data' and 'headers' keys.
Data is the same as when retheader is False.
Headers are the response header
(a requests.structures.CaseInsensitiveDict).
If multiple requests were made,
the header will be from the last one.
:rtype: Union[dict, Sequence[dict]]
:raises GenderizeException: if API server returns HTTP error code.
"""
responses = [
self._get_chunk(name_chunk, country_id, language_id)
for name_chunk
in _chunked(names, Genderize.BATCH_SIZE)
]
data = list(chain.from_iterable(
response.data for response in responses
))
if retheader:
return {
"data": data,
"headers": responses[-1].headers,
}
else:
return data | Look up gender for a list of names.
Can optionally refine search with locale info.
May make multiple requests if there are more names than
can be retrieved in one call.
:param names: List of names.
:type names: Iterable[str]
:param country_id: Optional ISO 3166-1 alpha-2 country code.
:type country_id: Optional[str]
:param language_id: Optional ISO 639-1 language code.
:type language_id: Optional[str]
:param retheader: Optional
:type retheader: Optional[boolean]
:return:
If retheader is False:
List of dicts containing 'name', 'gender',
'probability', 'count' keys. If 'gender' is None,
'probability' and 'count' will be omitted.
else:
A dict containing 'data' and 'headers' keys.
Data is the same as when retheader is False.
Headers are the response header
(a requests.structures.CaseInsensitiveDict).
If multiple requests were made,
the header will be from the last one.
:rtype: Union[dict, Sequence[dict]]
:raises GenderizeException: if API server returns HTTP error code. |
def presnyields(self, *cycles, **keyw):
"""
This function calculates the presupernova yields of a full
structure profile from a remnant mass, mrem, to the surface.
Parameters
----------
cycles : variadic tuple
cycle[0] is the cycle to perform the presupernova yields
calculations on. If cycle[1] is also specified, the yields
are outputted using 'initial' abundances from cycle[1],
otherwise the ejected masses are outputted.
keyw : dict
A dict of key word arguments.
Notes
-----
The following keywords can be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| xm | "mass" |
+------------------+---------------+
| mrem | 0 |
+------------------+---------------+
abund and xm are used when the variables within the input file
differ in their names. The default values are set to the
output typically found in an MPPNP output file. For example,
if the table for the abundances is called "abundances" instead
of the default value, use abund = "abundances" as a keyword
argument.
mrem is specified using a keyword argument and tells the program
where to begin integrating.
"""
abund_list = []
xm_list = []
if ("xm" in keyw) == False:
keyw["xm"] = "mass"
if ("abund" in keyw) == False:
keyw["abund"] = "iso_massf"
if ("mrem" in keyw) == False:
mrem = 0.
else:
mrem = keyw["mrem"]
# Only two cycles are required in this program.
# Any more will be ignored
cylen = len(cycles)
if cylen > 2:
cylen = 2
for i in range(cylen):
cycle = cycles[i]
abund_list.append(self.se.get(cycle, keyw['abund']))
xm_list.append(self.se.get(cycle, keyw['xm']))
isoX = abund_list[0]
xm = xm_list[0]
if cylen == 2:
isoXini = abund_list[1] # initial abundances
niso = len(isoX[0,:])
nshells = len(xm)
X_i = np.zeros([niso], float)
ybound = np.zeros([niso], float)
xarray = np.zeros([nshells+1], float)
yarray = np.zeros([nshells+1], float)
# This part determines the index of the mass coordinate array which
# is closest to the specified remnant mass. This is used in the next
# part, which is used to interpolate the abundances at the remnant mass
for k in range(nshells):
k1 = k
if mrem<=xm[k]:
break
# This part is the interpolation of the isotopes found at the remnant
# mass.
for i in range(niso):
if k1>=1:
if isoX[k1-1,i]!=0.0:
m=old_div((isoX[k1,i]-isoX[k1-1,i]),(xm[k1]-xm[k1-1]))
ybound[i] = isoX[k1-1,i] +m*(mrem-xm[k1-1])
else:
ybound[i]=1.e-99
if k1==0:
if isoX[k1,i]!=0.0:
ybound[i]=isoX[k1,i]
else:
ybound[i]=1.e-99
# This part merges the interpolated data and the existing arrays into
# the arrays xarray and yarray. Once this is done, the summation is
# made.
xarray[0] = mrem
xarray[1:nshells-k1+1] = xm[k1:nshells]
for i in range(niso):
yarray[0] = ybound[i]
for j in range(nshells-k1):
if isoX[k1+j,i] != 0.0:
yarray[j+1] = isoX[k1+j,i]
else:
yarray[j+1] = 1.e-99
if cylen == 1:
# Calculate the ejected masses
for j in range(nshells-k1):
X_i[i] = X_i[i] + ((0.5*(yarray[j+1] + yarray[j])) * \
(xarray[j+1] - xarray[j]))
elif cylen == 2:
# Calculate the SN yield.
for j in range(nshells-k1):
X_i[i] = X_i[i] + ((0.5*(yarray[j+1] + yarray[j]) - isoXini[-1,i]) * \
(xarray[j+1] - xarray[j]))
return X_i | This function calculates the presupernova yields of a full
structure profile from a remnant mass, mrem, to the surface.
Parameters
----------
cycles : variadic tuple
cycle[0] is the cycle to perform the presupernova yields
calculations on. If cycle[1] is also specified, the yields
are outputted using 'initial' abundances from cycle[1],
otherwise the ejected masses are outputted.
keyw : dict
A dict of key word arguments.
Notes
-----
The following keywords can be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| xm | "mass" |
+------------------+---------------+
| mrem | 0 |
+------------------+---------------+
abund and xm are used when the variables within the input file
differ in their names. The default values are set to the
output typically found in an MPPNP output file. For example,
if the table for the abundances is called "abundances" instead
of the default value, use abund = "abundances" as a keyword
argument.
mrem is specified using a keyword argument and tells the program
where to begin integrating. |
def add_instance(model, _commit=True, **kwargs):
"""Add instance to database.
:param model: a string, model name in rio.models
:param _commit: control whether commit data to database or not. Default True.
:param \*\*kwargs: persisted data.
:return: instance id.
"""
try:
model = get_model(model)
except ImportError:
return None
instance = model(**kwargs)
db.session.add(instance)
try:
if _commit:
db.session.commit()
else:
db.session.flush()
return instance.id
except IntegrityError:
db.session.rollback()
return | Add instance to database.
:param model: a string, model name in rio.models
:param _commit: control whether commit data to database or not. Default True.
:param \*\*kwargs: persisted data.
:return: instance id. |
def addSuffixToExtensions(toc):
"""
Returns a new TOC with proper library suffix for EXTENSION items.
"""
new_toc = TOC()
for inm, fnm, typ in toc:
if typ in ('EXTENSION', 'DEPENDENCY'):
binext = os.path.splitext(fnm)[1]
if not os.path.splitext(inm)[1] == binext:
inm = inm + binext
new_toc.append((inm, fnm, typ))
return new_toc | Returns a new TOC with proper library suffix for EXTENSION items. |
def add(self, command_template, job_class):
""" Given a command template, add it as a job to the queue. """
job = JobTemplate(command_template.alias,
command_template=command_template,
depends_on=command_template.depends_on, queue=self.queue,
job_class=job_class)
self.queue.push(job) | Given a command template, add it as a job to the queue. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.