code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def logs(self, follow=False):
"""
Get logs from this container. Iterator has one log line followed by a newline in next item.
The logs are NOT encoded (they are str, not bytes).
Let's look at an example::
image = conu.PodmanImage("fedora", tag="27")
command = ["bash", "-c", "for x in `seq 1 5`; do echo $x; sleep 1; done"]
container = image.run_via_binary(command=command)
for line in container.logs(follow=True):
print(line)
This will output
.. code-block:: none
'1' '\n' '2' '\n' '3' '\n' '4' '\n' '5' '\n'
:param follow: bool, provide new logs as they come
:return: iterator (of str)
"""
# TODO: podman logs have different behavior than docker
follow = ["--follow"] if follow else []
cmdline = ["podman", "logs"] + follow + [self._id or self.get_id()]
output = run_cmd(cmdline, return_output=True)
return output | Get logs from this container. Iterator has one log line followed by a newline in next item.
The logs are NOT encoded (they are str, not bytes).
Let's look at an example::
image = conu.PodmanImage("fedora", tag="27")
command = ["bash", "-c", "for x in `seq 1 5`; do echo $x; sleep 1; done"]
container = image.run_via_binary(command=command)
for line in container.logs(follow=True):
print(line)
This will output
.. code-block:: none
'1' '\n' '2' '\n' '3' '\n' '4' '\n' '5' '\n'
:param follow: bool, provide new logs as they come
:return: iterator (of str) |
def get_genericpage(cls, kb_app):
""" Return the one class if configured, otherwise default """
# Presumes the registry has been committed
q = dectate.Query('genericpage')
klasses = sorted(q(kb_app), key=lambda args: args[0].order)
if not klasses:
# The site doesn't configure a genericpage,
return Genericpage
else:
return klasses[0][1] | Return the one class if configured, otherwise default |
def _check_dtype(self, dtype):
"""Check if dtype string is valid and return ctype string."""
try:
return _ffi_types[dtype]
except KeyError:
raise ValueError("dtype must be one of {0!r} and not {1!r}".format(
sorted(_ffi_types.keys()), dtype)) | Check if dtype string is valid and return ctype string. |
def saveData(self, dataOutputFile, categoriesOutputFile):
"""
Save the processed data and the associated category mapping.
@param dataOutputFile (str) Location to save data
@param categoriesOutputFile (str) Location to save category map
@return (str) Path to the saved data file iff
saveData() is successful.
"""
if self.records is None:
return False
if not dataOutputFile.endswith("csv"):
raise TypeError("data output file must be csv.")
if not categoriesOutputFile.endswith("json"):
raise TypeError("category output file must be json")
# Ensure directory exists
dataOutputDirectory = os.path.dirname(dataOutputFile)
if not os.path.exists(dataOutputDirectory):
os.makedirs(dataOutputDirectory)
categoriesOutputDirectory = os.path.dirname(categoriesOutputFile)
if not os.path.exists(categoriesOutputDirectory):
os.makedirs(categoriesOutputDirectory)
with open(dataOutputFile, "w") as f:
# Header
writer = csv.DictWriter(f, fieldnames=self.fieldNames)
writer.writeheader()
# Types
writer.writerow(self.types)
# Special characters
writer.writerow(self.specials)
for data in self.records:
for record in data:
writer.writerow(record)
with open(categoriesOutputFile, "w") as f:
f.write(json.dumps(self.categoryToId,
sort_keys=True,
indent=4,
separators=(",", ": ")))
return dataOutputFile | Save the processed data and the associated category mapping.
@param dataOutputFile (str) Location to save data
@param categoriesOutputFile (str) Location to save category map
@return (str) Path to the saved data file iff
saveData() is successful. |
def cli(env, sortby, columns, datacenter, username, storage_type):
"""List block storage."""
block_manager = SoftLayer.BlockStorageManager(env.client)
block_volumes = block_manager.list_block_volumes(datacenter=datacenter,
username=username,
storage_type=storage_type,
mask=columns.mask())
table = formatting.Table(columns.columns)
table.sortby = sortby
for block_volume in block_volumes:
table.add_row([value or formatting.blank()
for value in columns.row(block_volume)])
env.fout(table) | List block storage. |
def processing_blocks(self):
"""Return the a JSON dict encoding the PBs known to SDP."""
pb_list = ProcessingBlockList()
# TODO(BMo) realtime, offline etc.
return json.dumps(dict(active=pb_list.active,
completed=pb_list.completed,
aborted=pb_list.aborted)) | Return the a JSON dict encoding the PBs known to SDP. |
def to_pandas_df(self, column_names=None, selection=None, strings=True, virtual=False, index_name=None):
"""Return a pandas DataFrame containing the ndarray corresponding to the evaluated data
If index is given, that column is used for the index of the dataframe.
Example
>>> df_pandas = df.to_pandas_df(["x", "y", "z"])
>>> df_copy = vaex.from_pandas(df_pandas)
:param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param index_column: if this column is given it is used for the index of the DataFrame
:return: pandas.DataFrame object
"""
import pandas as pd
data = self.to_dict(column_names=column_names, selection=selection, strings=strings, virtual=virtual)
if index_name is not None:
if index_name in data:
index = data.pop(index_name)
else:
index = self.evaluate(index_name, selection=selection)
else:
index = None
df = pd.DataFrame(data=data, index=index)
if index is not None:
df.index.name = index_name
return df | Return a pandas DataFrame containing the ndarray corresponding to the evaluated data
If index is given, that column is used for the index of the dataframe.
Example
>>> df_pandas = df.to_pandas_df(["x", "y", "z"])
>>> df_copy = vaex.from_pandas(df_pandas)
:param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param index_column: if this column is given it is used for the index of the DataFrame
:return: pandas.DataFrame object |
def get_fc2(supercell,
symmetry,
dataset,
atom_list=None,
decimals=None):
"""Force constants are computed.
Force constants, Phi, are calculated from sets for forces, F, and
atomic displacement, d:
Phi = -F / d
This is solved by matrix pseudo-inversion.
Crystal symmetry is included when creating F and d matrices.
Returns
-------
ndarray
Force constants[ i, j, a, b ]
i: Atom index of finitely displaced atom.
j: Atom index at which force on the atom is measured.
a, b: Cartesian direction indices = (0, 1, 2) for i and j, respectively
dtype=double
shape=(len(atom_list),n_satom,3,3),
"""
if atom_list is None:
fc_dim0 = supercell.get_number_of_atoms()
else:
fc_dim0 = len(atom_list)
force_constants = np.zeros((fc_dim0,
supercell.get_number_of_atoms(),
3, 3), dtype='double', order='C')
# Fill force_constants[ displaced_atoms, all_atoms_in_supercell ]
atom_list_done = _get_force_constants_disps(
force_constants,
supercell,
dataset,
symmetry,
atom_list=atom_list)
rotations = symmetry.get_symmetry_operations()['rotations']
lattice = np.array(supercell.get_cell().T, dtype='double', order='C')
permutations = symmetry.get_atomic_permutations()
distribute_force_constants(force_constants,
atom_list_done,
lattice,
rotations,
permutations,
atom_list=atom_list)
if decimals:
force_constants = force_constants.round(decimals=decimals)
return force_constants | Force constants are computed.
Force constants, Phi, are calculated from sets for forces, F, and
atomic displacement, d:
Phi = -F / d
This is solved by matrix pseudo-inversion.
Crystal symmetry is included when creating F and d matrices.
Returns
-------
ndarray
Force constants[ i, j, a, b ]
i: Atom index of finitely displaced atom.
j: Atom index at which force on the atom is measured.
a, b: Cartesian direction indices = (0, 1, 2) for i and j, respectively
dtype=double
shape=(len(atom_list),n_satom,3,3), |
def down_by_time(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1):
"""Sync most recent file by date, time attribues"""
files = command.list_files(*filters, remote_dir=remote_dir)
most_recent = sorted(files, key=lambda f: f.datetime)
to_sync = most_recent[-count:]
_notify_sync(Direction.down, to_sync)
down_by_files(to_sync[::-1], local_dir=local_dir) | Sync most recent file by date, time attribues |
def merge(cls, *args, **kwargs):
"""Create a new Ent from one or more existing Ents. Keys in the
later Ent objects will overwrite the keys of the previous Ents.
Later keys of different type than in earlier Ents will be bravely
ignored.
The following keyword arguments are recognized:
newkeys: boolean value to determine whether keys from later Ents
should be included if they do not exist in earlier Ents.
ignore: list of strings of key names that should not be overridden by
later Ent keys.
"""
newkeys = bool(kwargs.get('newkeys', False))
ignore = kwargs.get('ignore', list())
if len(args) < 1:
raise ValueError('no ents given to Ent.merge()')
elif not all(isinstance(s, Ent) for s in args):
raise ValueError('all positional arguments to Ent.merge() must '
'be instances of Ent')
ent = args[0]
data = cls.load(ent)
for ent in args[1:]:
for key, value in ent.__dict__.items():
if key in ignore:
continue
if key in data.__dict__:
v1 = data.__dict__[key]
if type(value) == type(v1):
if isinstance(v1, Ent):
data.__dict__[key] = cls.merge(v1, value, **kwargs)
else:
data.__dict__[key] = cls.load(value)
elif newkeys:
data.__dict__[key] = value
return data | Create a new Ent from one or more existing Ents. Keys in the
later Ent objects will overwrite the keys of the previous Ents.
Later keys of different type than in earlier Ents will be bravely
ignored.
The following keyword arguments are recognized:
newkeys: boolean value to determine whether keys from later Ents
should be included if they do not exist in earlier Ents.
ignore: list of strings of key names that should not be overridden by
later Ent keys. |
def apply_policy(self, policy):
"""Apply a firewall policy. """
tenant_name = policy['tenant_name']
fw_id = policy['fw_id']
fw_name = policy['fw_name']
LOG.info("asa_apply_policy: tenant=%(tenant)s fw_id=%(fw_id)s "
"fw_name=%(fw_name)s",
{'tenant': tenant_name, 'fw_id': fw_id, 'fw_name': fw_name})
cmds = ["conf t", "changeto context " + tenant_name]
for rule_id, rule in policy['rules'].items():
acl = self.build_acl(tenant_name, rule)
LOG.info("rule[%(rule_id)s]: name=%(name)s enabled=%(enabled)s"
" protocol=%(protocol)s dport=%(dport)s "
"sport=%(sport)s dip=%(dport)s "
"sip=%(sip)s action=%(dip)s",
{'rule_id': rule_id, 'name': rule.get('name'),
'enabled': rule.get('enabled'),
'protocol': rule.get('protocol'),
'dport': rule.get('dst_port'),
'sport': rule.get('src_port'),
'dip': rule.get('destination_ip_address'),
'sip': rule.get('source_ip_address'),
'action': rule.get('action')})
# remove the old ace for this rule
if rule_id in self.rule_tbl:
cmds.append('no ' + self.rule_tbl[rule_id])
self.rule_tbl[rule_id] = acl
if tenant_name in self.tenant_rule:
if rule_id not in self.tenant_rule[tenant_name]['rule_lst']:
self.tenant_rule[tenant_name]['rule_lst'].append(rule_id)
cmds.append(acl)
cmds.append("access-group " + tenant_name + " global")
cmds.append("write memory")
LOG.info("cmds sent is %s", cmds)
data = {"commands": cmds}
return self.rest_send_cli(data) | Apply a firewall policy. |
def run(file, access_key, secret_key, **kwargs):
"""命令行运行huobitrade"""
if file:
import sys
file_path, file_name = os.path.split(file)
sys.path.append(file_path)
strategy_module = importlib.import_module(os.path.splitext(file_name)[0])
init = getattr(strategy_module, 'init', None)
handle_func = getattr(strategy_module, 'handle_func', None)
schedule = getattr(strategy_module, 'schedule', None)
else:
init, handle_func, scedule = [None] * 3
setKey(access_key, secret_key)
url = kwargs.get('url')
hostname = 'api.huobi.br.com'
if url:
hostname = urlparse(url).hostname
setUrl('https://' + hostname, 'https://' + hostname)
reconn = kwargs.get('reconn', -1)
from huobitrade import HBWebsocket, HBRestAPI
from huobitrade.datatype import HBMarket, HBAccount, HBMargin
restapi = HBRestAPI(get_acc=True)
ws = HBWebsocket(host=hostname, reconn=reconn)
auth_ws = HBWebsocket(host=hostname, auth=True, reconn=reconn)
data = HBMarket()
account = HBAccount()
margin = HBMargin()
ws_open = False
ws_auth = False
@ws.after_open
def _open():
nonlocal ws_open
click.echo('行情接口连接成功')
ws_open = True
@auth_ws.after_auth
def _auth():
nonlocal ws_auth
click.echo('鉴权接口鉴权成功')
ws_auth = True
ws.run()
auth_ws.run()
for i in range(10):
time.sleep(3)
click.echo(f'连接:第{i+1}次连接')
if ws_open&ws_auth:
break
else:
ws.stop()
auth_ws.stop()
raise Exception('连接失败')
if init:
init(restapi, ws, auth_ws)
if handle_func:
for k, v in handle_func.items():
if k.split('.')[0].lower() == 'market':
ws.register_handle_func(k)(v)
else:
auth_ws.register_handle_func(k)(v)
if schedule:
print('testing')
from huobitrade.handler import TimeHandler
interval = scedule.__kwdefaults__['interval']
timerhandler = TimeHandler('scheduler', interval)
timerhandler.handle = lambda msg: schedule(restapi, ws, auth_ws)
timerhandler.start()
while True:
try:
code = click.prompt('huobitrade>>')
if code == 'exit':
if click.confirm('是否要退出huobitrade'):
break
else:
continue
else:
result = eval(code)
click.echo(result)
except Exception as e:
click.echo(traceback.format_exc())
ws.stop()
auth_ws.stop() | 命令行运行huobitrade |
def set_owner(obj_name, principal, obj_type='file'):
'''
Set the owner of an object. This can be a file, folder, registry key,
printer, service, etc...
Args:
obj_name (str):
The object for which to set owner. This can be the path to a file or
folder, a registry key, printer, etc. For more information about how
to format the name see:
https://msdn.microsoft.com/en-us/library/windows/desktop/aa379593(v=vs.85).aspx
principal (str):
The name of the user or group to make owner of the object. Can also
pass a SID.
obj_type (Optional[str]):
The type of object for which to set the owner. Default is ``file``
Returns:
bool: True if successful, raises an error otherwise
Usage:
.. code-block:: python
salt.utils.win_dacl.set_owner('C:\\MyDirectory', 'jsnuffy', 'file')
'''
sid = get_sid(principal)
obj_flags = flags()
# Validate obj_type
if obj_type.lower() not in obj_flags.obj_type:
raise SaltInvocationError(
'Invalid "obj_type" passed: {0}'.format(obj_type))
if 'registry' in obj_type.lower():
obj_name = dacl().get_reg_name(obj_name)
# To set the owner to something other than the logged in user requires
# SE_TAKE_OWNERSHIP_NAME and SE_RESTORE_NAME privileges
# Enable them for the logged in user
# Setup the privilege set
new_privs = set()
luid = win32security.LookupPrivilegeValue('', 'SeTakeOwnershipPrivilege')
new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED))
luid = win32security.LookupPrivilegeValue('', 'SeRestorePrivilege')
new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED))
# Get the current token
p_handle = win32api.GetCurrentProcess()
t_handle = win32security.OpenProcessToken(
p_handle,
win32security.TOKEN_ALL_ACCESS | win32con.TOKEN_ADJUST_PRIVILEGES)
# Enable the privileges
win32security.AdjustTokenPrivileges(t_handle, 0, new_privs)
# Set the user
try:
win32security.SetNamedSecurityInfo(
obj_name,
obj_flags.obj_type[obj_type.lower()],
obj_flags.element['owner'],
sid,
None, None, None)
except pywintypes.error as exc:
log.exception('Failed to make %s the owner: %s', principal, exc)
raise CommandExecutionError(
'Failed to set owner: {0}'.format(obj_name), exc.strerror)
return True | Set the owner of an object. This can be a file, folder, registry key,
printer, service, etc...
Args:
obj_name (str):
The object for which to set owner. This can be the path to a file or
folder, a registry key, printer, etc. For more information about how
to format the name see:
https://msdn.microsoft.com/en-us/library/windows/desktop/aa379593(v=vs.85).aspx
principal (str):
The name of the user or group to make owner of the object. Can also
pass a SID.
obj_type (Optional[str]):
The type of object for which to set the owner. Default is ``file``
Returns:
bool: True if successful, raises an error otherwise
Usage:
.. code-block:: python
salt.utils.win_dacl.set_owner('C:\\MyDirectory', 'jsnuffy', 'file') |
def _update_service_profile(self, handle, service_profile,
vlan_id, ucsm_ip):
"""Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in.
"""
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
eth_port_paths = ["%s%s" % (service_profile, ep)
for ep in virtio_port_list]
vlan_name = self.make_vlan_name(vlan_id)
try:
handle.StartTransaction()
obj = handle.GetManagedObject(
None,
self.ucsmsdk.LsServer.ClassId(),
{self.ucsmsdk.LsServer.DN: service_profile})
if not obj:
LOG.debug('UCS Manager network driver could not find '
'Service Profile %s in UCSM %s',
service_profile, ucsm_ip)
return False
for eth_port_path in eth_port_paths:
eth = handle.GetManagedObject(
obj, self.ucsmsdk.VnicEther.ClassId(),
{self.ucsmsdk.VnicEther.DN: eth_port_path}, True)
if eth:
vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX +
vlan_name)
eth_if = handle.AddManagedObject(eth,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_path,
self.ucsmsdk.VnicEtherIf.NAME: vlan_name,
self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True)
if not eth_if:
LOG.debug('UCS Manager network driver could not '
'update Service Profile %s with vlan %d',
service_profile, vlan_id)
return False
else:
LOG.debug('UCS Manager network driver did not find '
'ethernet port at %s', eth_port_path)
handle.CompleteTransaction()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Service Profile',
vlan_name, ucsm_ip) | Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in. |
def get_max_value(self):
""" Get the maximum value """
value = self.get_default_value()
if self.attribute_type is str:
max_value = value.ljust(self.max_length + 1, 'a')
elif self.attribute_type is int:
max_value = self.max_length + 1
else:
raise TypeError('Attribute %s can not have a maximum value' % self.local_name)
return max_value | Get the maximum value |
def readLocationElement(self, locationElement):
""" Format 0 location reader """
if self._strictAxisNames and not self.documentObject.axes:
raise DesignSpaceDocumentError("No axes defined")
loc = {}
for dimensionElement in locationElement.findall(".dimension"):
dimName = dimensionElement.attrib.get("name")
if self._strictAxisNames and dimName not in self.axisDefaults:
# In case the document contains no axis definitions,
self.log.warning("Location with undefined axis: \"%s\".", dimName)
continue
xValue = yValue = None
try:
xValue = dimensionElement.attrib.get('xvalue')
xValue = float(xValue)
except ValueError:
self.log.warning("KeyError in readLocation xValue %3.3f", xValue)
try:
yValue = dimensionElement.attrib.get('yvalue')
if yValue is not None:
yValue = float(yValue)
except ValueError:
pass
if yValue is not None:
loc[dimName] = (xValue, yValue)
else:
loc[dimName] = xValue
return loc | Format 0 location reader |
def get_security_attributes_for_user(user=None):
"""
Return a SECURITY_ATTRIBUTES structure with the SID set to the
specified user (uses current user if none is specified).
"""
if user is None:
user = get_current_user()
assert isinstance(user, security.TOKEN_USER), (
"user must be TOKEN_USER instance")
SD = security.SECURITY_DESCRIPTOR()
SA = security.SECURITY_ATTRIBUTES()
# by attaching the actual security descriptor, it will be garbage-
# collected with the security attributes
SA.descriptor = SD
SA.bInheritHandle = 1
ctypes.windll.advapi32.InitializeSecurityDescriptor(
ctypes.byref(SD),
security.SECURITY_DESCRIPTOR.REVISION)
ctypes.windll.advapi32.SetSecurityDescriptorOwner(
ctypes.byref(SD),
user.SID, 0)
return SA | Return a SECURITY_ATTRIBUTES structure with the SID set to the
specified user (uses current user if none is specified). |
def recorddiff(a, b, buffersize=None, tempdir=None, cache=True, strict=False):
"""
Find the difference between records in two tables. E.g.::
>>> import petl as etl
>>> a = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> b = [['bar', 'foo', 'baz'],
... [2, 'B', False],
... [9, 'A', False],
... [3, 'B', True],
... [9, 'C', True]]
>>> added, subtracted = etl.recorddiff(a, b)
>>> added
+-----+-----+-------+
| bar | foo | baz |
+=====+=====+=======+
| 3 | 'B' | True |
+-----+-----+-------+
| 9 | 'A' | False |
+-----+-----+-------+
>>> subtracted
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'C' | 7 | False |
+-----+-----+-------+
Convenient shorthand for
``(recordcomplement(b, a), recordcomplement(a, b))``. See also
:func:`petl.transform.setops.recordcomplement`.
See also the discussion of the `buffersize`, `tempdir` and `cache`
arguments under the :func:`petl.transform.sorts.sort` function.
.. versionchanged:: 1.1.0
If `strict` is `True` then strict set-like behaviour is used.
"""
added = recordcomplement(b, a, buffersize=buffersize, tempdir=tempdir,
cache=cache, strict=strict)
subtracted = recordcomplement(a, b, buffersize=buffersize, tempdir=tempdir,
cache=cache, strict=strict)
return added, subtracted | Find the difference between records in two tables. E.g.::
>>> import petl as etl
>>> a = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> b = [['bar', 'foo', 'baz'],
... [2, 'B', False],
... [9, 'A', False],
... [3, 'B', True],
... [9, 'C', True]]
>>> added, subtracted = etl.recorddiff(a, b)
>>> added
+-----+-----+-------+
| bar | foo | baz |
+=====+=====+=======+
| 3 | 'B' | True |
+-----+-----+-------+
| 9 | 'A' | False |
+-----+-----+-------+
>>> subtracted
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'C' | 7 | False |
+-----+-----+-------+
Convenient shorthand for
``(recordcomplement(b, a), recordcomplement(a, b))``. See also
:func:`petl.transform.setops.recordcomplement`.
See also the discussion of the `buffersize`, `tempdir` and `cache`
arguments under the :func:`petl.transform.sorts.sort` function.
.. versionchanged:: 1.1.0
If `strict` is `True` then strict set-like behaviour is used. |
def run_interrupted(self):
"""
Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
start = datetime.datetime.now()
try:
cwd = os.getcwd()
v = sys.version.replace("\n", " ")
logger.info("Custodian started in singleshot mode at {} in {}."
.format(start, cwd))
logger.info("Custodian running on Python version {}".format(v))
# load run log
if os.path.exists(Custodian.LOG_FILE):
self.run_log = loadfn(Custodian.LOG_FILE, cls=MontyDecoder)
if len(self.run_log) == 0:
# starting up an initial job - setup input and quit
job_n = 0
job = self.jobs[job_n]
logger.info("Setting up job no. 1 ({}) ".format(job.name))
job.setup()
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
return len(self.jobs)
else:
# Continuing after running calculation
job_n = self.run_log[-1]['job_n']
job = self.jobs[job_n]
# If we had to fix errors from a previous run, insert clean log
# dict
if len(self.run_log[-1]['corrections']) > 0:
logger.info("Reran {}.run due to fixable errors".format(
job.name))
# check error handlers
logger.info("Checking error handlers for {}.run".format(
job.name))
if self._do_check(self.handlers):
logger.info("Failed validation based on error handlers")
# raise an error for an unrecoverable error
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise NonRecoverableError(s, True, x["handler"])
logger.info("Corrected input based on error handlers")
# Return with more jobs to run if recoverable error caught
# and corrected for
return len(self.jobs) - job_n
# check validators
logger.info("Checking validator for {}.run".format(job.name))
for v in self.validators:
if v.check():
self.run_log[-1]["validator"] = v
logger.info("Failed validation based on validator")
s = "Validation failed: {}".format(v)
raise ValidationError(s, True, v)
logger.info("Postprocessing for {}.run".format(job.name))
job.postprocess()
# IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN
# VALIDATED
if len(self.jobs) == (job_n + 1):
self.finished = True
return 0
# Setup next job_n
job_n += 1
job = self.jobs[job_n]
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
job.setup()
return len(self.jobs) - job_n
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.finished and self.gzipped_output:
gzip_dir(".") | Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached |
def get_date_822():
"""return output of 822-date command"""
cmd = '/bin/date'
if not os.path.exists(cmd):
raise ValueError('%s command does not exist.'%cmd)
args = [cmd,'-R']
result = get_cmd_stdout(args).strip()
result = normstr(result)
return result | return output of 822-date command |
def qn_to_qubo(expr):
"""Convert Sympy's expr to QUBO.
Args:
expr: Sympy's quadratic expression with variable `q0`, `q1`, ...
Returns:
[[float]]: Returns QUBO matrix.
"""
try:
import sympy
except ImportError:
raise ImportError("This function requires sympy. Please install it.")
assert type(expr) == sympy.Add
to_i = lambda s: int(str(s)[1:])
max_i = max(map(to_i, expr.free_symbols)) + 1
qubo = [[0.] * max_i for _ in range(max_i)]
for arg in expr.args:
syms = arg.free_symbols
assert len(syms) <= 2
if len(syms) == 2:
assert type(arg) == sympy.Mul
i, j = list(map(to_i, syms))
if i > j:
i, j = j, i
if i == j:
if len(arg.args) == 2:
qubo[i][i] = float(arg.args[0])
elif len(arg.args) == 1:
qubo[i][i] = 1.0
else:
raise ValueError(f"Too many args! arg.args = {arg.args}")
continue
if len(arg.args) == 3:
qubo[i][j] = float(arg.args[0])
elif len(arg.args) == 2:
qubo[i][j]
if len(syms) == 1:
if len(arg.args) == 2:
assert type(arg) == sympy.Mul
i = to_i(next(iter(syms)))
qubo[i][i] = float(arg.args[0])
elif len(arg.args) == 1:
qubo[i][i] = 1.0
else:
raise ValueError(f"Too many args! arg.args = {arg.args}")
return qubo | Convert Sympy's expr to QUBO.
Args:
expr: Sympy's quadratic expression with variable `q0`, `q1`, ...
Returns:
[[float]]: Returns QUBO matrix. |
def parse_env(self, env=None, namespace=None):
"""Parse environment variables."""
env = env or os.environ
results = {}
if not namespace:
namespace = self.prog
namespace = namespace.upper() # pylint: disable=no-member
for option in self._options:
env_var = option.kwargs.get('env')
default_env = "%s_%s" % (namespace, option.name.upper())
if env_var and env_var in env:
value = env[env_var]
results[option.dest] = option.type(value)
elif default_env in env:
value = env[default_env]
results[option.dest] = option.type(value)
return results | Parse environment variables. |
def _get_fields(self):
"""
Used by str, unicode, repr and __reduce__.
Returns only the fields necessary to reconstruct the Interval.
:return: reconstruction info
:rtype: tuple
"""
if self.data is not None:
return self.begin, self.end, self.data
else:
return self.begin, self.end | Used by str, unicode, repr and __reduce__.
Returns only the fields necessary to reconstruct the Interval.
:return: reconstruction info
:rtype: tuple |
def reorient(self, up, look):
'''
Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera).
'''
from blmath.geometry.transform import rotation_from_up_and_look
from blmath.numerics import as_numeric_array
up = as_numeric_array(up, (3,))
look = as_numeric_array(look, (3,))
if self.v is not None:
self.v = np.dot(rotation_from_up_and_look(up, look), self.v.T).T | Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera). |
def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c() | Process callbacks from `call_from_executor` in eventloop. |
def get_line_value(self, context_type):
"""
Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line
"""
if context_type.upper() == "ENV":
return self.line_envs
elif context_type.upper() == "LABEL":
return self.line_labels | Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line |
def peek_assoc(store, container, _stack=None):
"""
Deserialize association lists.
"""
assoc = []
try:
if store.getRecordAttr('key', container) == 'escaped':
for i in container:
assoc.append(store.peek(i, container, _stack=_stack))
else:
for i in container:
assoc.append((store.strRecord(i, container), store.peek(i, container, _stack=_stack)))
#print(assoc) # debugging
except TypeError as e:
try:
for i in container:
pass
raise e
except TypeError:
raise TypeError("container is not iterable; peek is not compatible\n\t{}".format(e.args[0]))
return assoc | Deserialize association lists. |
async def _post(self, zone_id: int = None, json: dict = None) -> dict:
"""Post data to a (non)existing zone."""
return await self._request(
'post', 'zone/{0}/properties'.format(zone_id), json=json) | Post data to a (non)existing zone. |
def thumbUrl(self):
""" Return the first first thumbnail url starting on
the most specific thumbnail for that item.
"""
thumb = self.firstAttr('thumb', 'parentThumb', 'granparentThumb')
return self._server.url(thumb, includeToken=True) if thumb else None | Return the first first thumbnail url starting on
the most specific thumbnail for that item. |
def retry(ex=RETRIABLE, tries=4, delay=5, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ex: the exception to check. may be a tuple of exceptions to check
:param tries: number of times to try (not retry) before giving up
:param delay: initial delay between retries in seconds.
A random 0-5s will be added to this number to stagger calls.
:param backoff: backoff multiplier e.g. value of 2 will double the delay each retry
:param logger: logger to use. If None, print
"""
def deco_retry(func):
"""@retry(arg[, ...]) -> true decorator"""
@wraps(func)
def f_retry(*args, **kwargs):
"""true decorator -> decorated function"""
mtries, mdelay = tries, delay
while mtries > 1:
try:
return func(*args, **kwargs)
except ex as error:
sleeping = mdelay + randint(0, 5)
msg = "%s, Retrying in %d seconds..." % (str(error), sleeping)
if logger:
logger.warning(msg)
sleep(sleeping)
mtries -= 1
mdelay *= backoff
return func(*args, **kwargs)
return f_retry # true decorator
return deco_retry | Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ex: the exception to check. may be a tuple of exceptions to check
:param tries: number of times to try (not retry) before giving up
:param delay: initial delay between retries in seconds.
A random 0-5s will be added to this number to stagger calls.
:param backoff: backoff multiplier e.g. value of 2 will double the delay each retry
:param logger: logger to use. If None, print |
def set_job(self, key, func, args):
"""
Get a scheduled task or set if none exists.
Returns:
- task coroutine/continuation
"""
res, pk = key
jobs, lock = self._jobs
task = _tasks.UpdateTask(func(*args), key)
with lock:
job = jobs[res].get(pk)
had = bool(job)
if not job:
job = task
jobs[res][pk] = job
else:
task.cancel()
self._log.debug('Scheduling: %s-%s (%s)', res.tag, pk,
'new task' if not had else 'dup')
return job | Get a scheduled task or set if none exists.
Returns:
- task coroutine/continuation |
def generate_strip_subparser(subparsers):
"""Adds a sub-command parser to `subparsers` to process prepared files
for use with the tacl ngrams command."""
parser = subparsers.add_parser(
'strip', description=constants.STRIP_DESCRIPTION,
epilog=constants.STRIP_EPILOG, formatter_class=ParagraphFormatter,
help=constants.STRIP_HELP)
parser.set_defaults(func=strip_files)
utils.add_common_arguments(parser)
parser.add_argument('input', help=constants.STRIP_INPUT_HELP,
metavar='INPUT')
parser.add_argument('output', help=constants.STRIP_OUTPUT_HELP,
metavar='OUTPUT') | Adds a sub-command parser to `subparsers` to process prepared files
for use with the tacl ngrams command. |
def get_attached_pipettes(self):
""" Mimic the behavior of robot.get_attached_pipettes"""
api = object.__getattribute__(self, '_api')
instrs = {}
for mount, data in api.attached_instruments.items():
instrs[mount.name.lower()] = {
'model': data.get('name', None),
'id': data.get('pipette_id', None),
'mount_axis': Axis.by_mount(mount),
'plunger_axis': Axis.of_plunger(mount)
}
if data.get('name'):
instrs[mount.name.lower()]['tip_length'] \
= data.get('tip_length', None)
return instrs | Mimic the behavior of robot.get_attached_pipettes |
def run(self,evloop=None):
"""
Runs the application main loop.
This method is blocking and needs to be called from the main thread to avoid OpenGL bugs that can occur.
``evloop`` may optionally be a subclass of :py:class:`pyglet.app.base.EventLoop` to replace the default event loop.
"""
self.sendEvent("peng3d:peng.run",{"peng":self,"window":self.window,"evloop":evloop})
self.window.run(evloop)
self.sendEvent("peng3d:peng.exit",{"peng":self}) | Runs the application main loop.
This method is blocking and needs to be called from the main thread to avoid OpenGL bugs that can occur.
``evloop`` may optionally be a subclass of :py:class:`pyglet.app.base.EventLoop` to replace the default event loop. |
def _read_opt_pad(self, code, *, desc):
"""Read HOPOPT padding options.
Structure of HOPOPT padding options [RFC 8200]:
* Pad1 Option:
+-+-+-+-+-+-+-+-+
| 0 |
+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
* PadN Option:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
| 1 | Opt Data Len | Option Data
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
1 8 hopopt.opt.length Length of Option Data
2 16 hopopt.pad.padding Padding
"""
_type = self._read_opt_type(code)
if code == 0:
opt = dict(
desc=desc,
type=_type,
length=1,
)
elif code == 1:
_size = self._read_unpack(1)
_padn = self._read_fileng(_size)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
padding=_padn,
)
else:
raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format')
return opt | Read HOPOPT padding options.
Structure of HOPOPT padding options [RFC 8200]:
* Pad1 Option:
+-+-+-+-+-+-+-+-+
| 0 |
+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
* PadN Option:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
| 1 | Opt Data Len | Option Data
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
1 8 hopopt.opt.length Length of Option Data
2 16 hopopt.pad.padding Padding |
def store(self, store_item):
"""
Store for tweets and user information. Must have all required information and types
"""
required_keys = {"type": str, "timestamp": float}
if not isinstance(store_item, dict):
raise TypeError("The stored item should be a dict")
for k, v in required_keys.items():
if k not in store_item:
raise AttributeError("{} is not available. Please add it.".format(k))
if not isinstance(store_item[k], v):
raise TypeError("{} is not a {}. Please change it. ".format(k, v))
#
# # TODO: CREATE FILTER FOR PERSISTENCE METHOD. Make sure it has the necessary data
# to_store = {'field1': thing.field1,
# 'date_field': thing.date_field,
# }
# to_store['stuff'] = Binary(cPickle.dumps(thing.stuff))
# Respect any soft-quota on write - raises if stats().totals.size > quota
self._arctic_lib.check_quota()
self._collection.update(store_item, store_item, upsert=True) | Store for tweets and user information. Must have all required information and types |
def lock(self, key, client):
"""Set the key that will be used to ensure messages come from one party
Args:
key (string): The key used to validate future messages
client (string): A string that will be returned to indicate who
locked this device.
"""
self.key = key
self.client = client | Set the key that will be used to ensure messages come from one party
Args:
key (string): The key used to validate future messages
client (string): A string that will be returned to indicate who
locked this device. |
def PSLLDQ(cpu, dest, src):
""" Packed Shift Left Logical Double Quadword
Shifts the destination operand (first operand) to the left by the number
of bytes specified in the count operand (second operand). The empty low-order
bytes are cleared (set to all 0s). If the value specified by the count
operand is greater than 15, the destination operand is set to all 0s.
The destination operand is an XMM register. The count operand is an 8-bit
immediate.
TEMP = COUNT;
if (TEMP > 15) TEMP = 16;
DEST = DEST << (TEMP * 8);
"""
count = Operators.ZEXTEND(src.read(), dest.size * 2)
byte_count = Operators.ITEBV(src.size * 2, count > 15, 16, count)
bit_count = byte_count * 8
val = Operators.ZEXTEND(dest.read(), dest.size * 2)
val = val << (Operators.ZEXTEND(bit_count, dest.size * 2))
dest.write(Operators.EXTRACT(val, 0, dest.size)) | Packed Shift Left Logical Double Quadword
Shifts the destination operand (first operand) to the left by the number
of bytes specified in the count operand (second operand). The empty low-order
bytes are cleared (set to all 0s). If the value specified by the count
operand is greater than 15, the destination operand is set to all 0s.
The destination operand is an XMM register. The count operand is an 8-bit
immediate.
TEMP = COUNT;
if (TEMP > 15) TEMP = 16;
DEST = DEST << (TEMP * 8); |
def calculate_overlap(self):
"""Create the array that describes how junctions overlap"""
overs = []
if not self.tx_obj1.range.overlaps(self.tx_obj2.range): return [] # if they dont overlap wont find anything
for i in range(0,len(self.j1)):
for j in range(0,len(self.j2)):
if self.j1[i].overlaps(self.j2[j],tolerance=self.tolerance):
overs.append([i,j])
return overs | Create the array that describes how junctions overlap |
def compute(self, bottomUpInput, enableLearn, enableInference=None):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.compute`.
"""
# The C++ TM takes 32 bit floats as input. uint32 works as well since the
# code only checks whether elements are non-zero
assert (bottomUpInput.dtype == numpy.dtype('float32')) or \
(bottomUpInput.dtype == numpy.dtype('uint32')) or \
(bottomUpInput.dtype == numpy.dtype('int32'))
self.iterationIdx = self.iterationIdx + 1
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if enableInference is None:
if enableLearn:
enableInference = False
else:
enableInference = True
# ====================================================================
# Run compute and retrieve selected state and member variables
self._setStatePointers()
y = self.cells4.compute(bottomUpInput, enableInference, enableLearn)
self.currentOutput = y.reshape((self.numberOfCols, self.cellsPerColumn))
self.avgLearnedSeqLength = self.cells4.getAvgLearnedSeqLength()
self._copyAllocatedStates()
# ========================================================================
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
activeColumns = bottomUpInput.nonzero()[0]
if enableInference:
predictedState = self.infPredictedState['t-1']
else:
predictedState = self.lrnPredictedState['t-1']
self._updateStatsInferEnd(self._internalStats,
activeColumns,
predictedState,
self.colConfidence['t-1'])
# Finally return the TM output
output = self._computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=enableLearn)
self.resetCalled = False
return output | Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.compute`. |
def authenticate(self, provider=None, identifier=None):
"Fetch user for a given provider by id."
provider_q = Q(provider__name=provider)
if isinstance(provider, Provider):
provider_q = Q(provider=provider)
try:
access = AccountAccess.objects.filter(
provider_q, identifier=identifier
).select_related('user')[0]
except IndexError:
return None
else:
return access.user | Fetch user for a given provider by id. |
def _generate_throw_error(self, name, reason):
"""Emits a generic error throwing line."""
throw_exc = '@throw([NSException exceptionWithName:@"{}" reason:{} userInfo:nil]);'
self.emit(throw_exc.format(name, reason)) | Emits a generic error throwing line. |
def FinalizeTaskStorage(self, task):
"""Finalizes a processed task storage.
Args:
task (Task): task.
Raises:
IOError: if the task storage does not exist.
OSError: if the task storage does not exist.
"""
if task.identifier not in self._task_storage_writers:
raise IOError('Storage writer for task: {0:s} does not exist.'.format(
task.identifier)) | Finalizes a processed task storage.
Args:
task (Task): task.
Raises:
IOError: if the task storage does not exist.
OSError: if the task storage does not exist. |
def aggregation_not_used_text_element(feature, parent):
"""Retrieve reference title header string from definitions."""
_ = feature, parent # NOQA
header = aggregation_not_used_text['string_format']
return header.capitalize() | Retrieve reference title header string from definitions. |
def safe_cast(invar, totype):
"""Performs a "safe" typecast.
Ensures that `invar` properly casts to `totype`. Checks after
casting that the result is actually of type `totype`. Any exceptions raised
by the typecast itself are unhandled.
Parameters
----------
invar
(arbitrary) -- Value to be typecast.
totype
|type| -- Type to which `invar` is to be cast.
Returns
-------
outvar
`type 'totype'` -- Typecast version of `invar`
Raises
------
~exceptions.TypeError
If result of typecast is not of type `totype`
"""
# Make the typecast. Just use Python built-in exceptioning
outvar = totype(invar)
# Check that the cast type matches
if not isinstance(outvar, totype):
raise TypeError("Result of cast to '{0}' is '{1}'"
.format(totype, type(outvar)))
## end if
# Success; return the cast value
return outvar | Performs a "safe" typecast.
Ensures that `invar` properly casts to `totype`. Checks after
casting that the result is actually of type `totype`. Any exceptions raised
by the typecast itself are unhandled.
Parameters
----------
invar
(arbitrary) -- Value to be typecast.
totype
|type| -- Type to which `invar` is to be cast.
Returns
-------
outvar
`type 'totype'` -- Typecast version of `invar`
Raises
------
~exceptions.TypeError
If result of typecast is not of type `totype` |
def interpret_element(element_type: str, text: str, span: str) -> Element:
"""
Construct an Element instance from regexp
groups.
"""
return Element(element_type,
interpret_span(span),
text) | Construct an Element instance from regexp
groups. |
def clause_indices(self):
"""The list of clause indices in ``words`` layer.
The indices are unique only in the boundary of a single sentence.
"""
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_IDX, None) for word in self[WORDS]] | The list of clause indices in ``words`` layer.
The indices are unique only in the boundary of a single sentence. |
def scrape_links(self, text, context=False):
'''Convenience function for scraping from a text string.'''
return self.iter_processed_links(io.StringIO(text), context=context) | Convenience function for scraping from a text string. |
def mpim_open(self, *, users: List[str], **kwargs) -> SlackResponse:
"""This method opens a multiparty direct message.
Args:
users (list): A lists of user ids. The ordering of the users
is preserved whenever a MPIM group is returned.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
kwargs.update({"users": users})
return self.api_call("mpim.open", json=kwargs) | This method opens a multiparty direct message.
Args:
users (list): A lists of user ids. The ordering of the users
is preserved whenever a MPIM group is returned.
e.g. ['W1234567890', 'U2345678901', 'U3456789012'] |
def intervals(graph):
"""
Compute the intervals of the graph
Returns
interval_graph: a graph of the intervals of G
interv_heads: a dict of (header node, interval)
"""
interval_graph = Graph() # graph of intervals
heads = [graph.entry] # list of header nodes
interv_heads = {} # interv_heads[i] = interval of header i
processed = {i: False for i in graph}
edges = defaultdict(list)
while heads:
head = heads.pop(0)
if not processed[head]:
processed[head] = True
interv_heads[head] = Interval(head)
# Check if there is a node which has all its predecessor in the
# current interval. If there is, add that node to the interval and
# repeat until all the possible nodes have been added.
change = True
while change:
change = False
for node in graph.rpo[1:]:
if all(
p in interv_heads[head] for p in graph.all_preds(node)):
change |= interv_heads[head].add_node(node)
# At this stage, a node which is not in the interval, but has one
# of its predecessor in it, is the header of another interval. So
# we add all such nodes to the header list.
for node in graph:
if node not in interv_heads[head] and node not in heads:
if any(
p in interv_heads[head] for p in graph.all_preds(node)):
edges[interv_heads[head]].append(node)
assert (node not in heads)
heads.append(node)
interval_graph.add_node(interv_heads[head])
interv_heads[head].compute_end(graph)
# Edges is a mapping of 'Interval -> [header nodes of interval successors]'
for interval, heads in edges.items():
for head in heads:
interval_graph.add_edge(interval, interv_heads[head])
interval_graph.entry = graph.entry.interval
if graph.exit:
interval_graph.exit = graph.exit.interval
return interval_graph, interv_heads | Compute the intervals of the graph
Returns
interval_graph: a graph of the intervals of G
interv_heads: a dict of (header node, interval) |
def _indexed_ifilter(self, recursive=True, matches=None, flags=FLAGS,
forcetype=None):
"""Iterate over nodes and their corresponding indices in the node list.
The arguments are interpreted as for :meth:`ifilter`. For each tuple
``(i, node)`` yielded by this method, ``self.index(node) == i``. Note
that if *recursive* is ``True``, ``self.nodes[i]`` might not be the
node itself, but will still contain it.
"""
match = self._build_matcher(matches, flags)
if recursive:
restrict = forcetype if recursive == self.RECURSE_OTHERS else None
def getter(i, node):
for ch in self._get_children(node, restrict=restrict):
yield (i, ch)
inodes = chain(*(getter(i, n) for i, n in enumerate(self.nodes)))
else:
inodes = enumerate(self.nodes)
for i, node in inodes:
if (not forcetype or isinstance(node, forcetype)) and match(node):
yield (i, node) | Iterate over nodes and their corresponding indices in the node list.
The arguments are interpreted as for :meth:`ifilter`. For each tuple
``(i, node)`` yielded by this method, ``self.index(node) == i``. Note
that if *recursive* is ``True``, ``self.nodes[i]`` might not be the
node itself, but will still contain it. |
def p_continue_statement_2(self, p):
"""continue_statement : CONTINUE identifier SEMI
| CONTINUE identifier AUTOSEMI
"""
p[0] = self.asttypes.Continue(p[2])
p[0].setpos(p) | continue_statement : CONTINUE identifier SEMI
| CONTINUE identifier AUTOSEMI |
def print_number_str(self, value, justify_right=True):
"""Print a 4 character long string of numeric values to the display. This
function is similar to print_str but will interpret periods not as
characters but as decimal points associated with the previous character.
"""
# Calculate length of value without decimals.
length = len(value.translate(None, '.'))
# Error if value without decimals is longer than 4 characters.
if length > 4:
self.print_str('----')
return
# Calculcate starting position of digits based on justification.
pos = (4-length) if justify_right else 0
# Go through each character and print it on the display.
for i, ch in enumerate(value):
if ch == '.':
# Print decimal points on the previous digit.
self.set_decimal(pos-1, True)
else:
self.set_digit(pos, ch)
pos += 1 | Print a 4 character long string of numeric values to the display. This
function is similar to print_str but will interpret periods not as
characters but as decimal points associated with the previous character. |
def _set_mct_l2ys_state(self, v, load=False):
"""
Setter method for mct_l2ys_state, mapped from YANG variable /mct_l2ys_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mct_l2ys_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mct_l2ys_state() directly.
YANG Description: MCT L2sys Operational Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mct_l2ys_state.mct_l2ys_state, is_container='container', presence=False, yang_name="mct-l2ys-state", rest_name="mct-l2ys-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'l2sys-mct-l2ys', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-l2sys-operational', defining_module='brocade-l2sys-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mct_l2ys_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mct_l2ys_state.mct_l2ys_state, is_container='container', presence=False, yang_name="mct-l2ys-state", rest_name="mct-l2ys-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'l2sys-mct-l2ys', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-l2sys-operational', defining_module='brocade-l2sys-operational', yang_type='container', is_config=True)""",
})
self.__mct_l2ys_state = t
if hasattr(self, '_set'):
self._set() | Setter method for mct_l2ys_state, mapped from YANG variable /mct_l2ys_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mct_l2ys_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mct_l2ys_state() directly.
YANG Description: MCT L2sys Operational Information |
def parse_fields_http(self, response, extra_org_map=None):
"""
The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
# Set the org_map. Map the orgRef handle to an RIR.
org_map = self.org_map.copy()
try:
org_map.update(extra_org_map)
except (TypeError, ValueError, IndexError, KeyError):
pass
try:
asn_data = {
'asn_registry': None,
'asn': None,
'asn_cidr': None,
'asn_country_code': None,
'asn_date': None,
'asn_description': None
}
try:
net_list = response['nets']['net']
if not isinstance(net_list, list):
net_list = [net_list]
except (KeyError, TypeError):
log.debug('No networks found')
net_list = []
for n in reversed(net_list):
try:
asn_data['asn_registry'] = (
org_map[n['orgRef']['@handle'].upper()]
)
except KeyError as e:
log.debug('Could not parse ASN registry via HTTP: '
'{0}'.format(str(e)))
continue
break
if not asn_data['asn_registry']:
log.debug('Could not parse ASN registry via HTTP')
raise ASNRegistryError('ASN registry lookup failed.')
except ASNRegistryError:
raise
except Exception as e: # pragma: no cover
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return asn_data | The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed. |
def setup_client(self, client_id=None, user_data=None, scan=True, broadcast=False):
"""Setup a newly connected client.
``client_id`` must be unique among all connected clients. If it is
passed as None, a random client_id will be generated as a string and
returned.
This method reserves internal resources for tracking what devices this
client has connected to and installs a monitor into the adapter on
behalf of the client.
It should be called whenever a new client connects to the device server
before any other activities by that client are allowed. By default,
all clients start receiving ``device_seen`` events but if you want
your client to also receive broadcast events, you can pass broadcast=True.
Args:
client_id (str): A unique identifier for this client that will be
used to refer to it in all future interactions. If this is
None, then a random string will be generated for the client_id.
user_data (object): An arbitrary object that you would like to store
with this client and will be passed to your event handler when
events are forwarded to this client.
scan (bool): Whether to install a monitor to listen for device_found
events.
broadcast (bool): Whether to install a monitor to list for broadcast
events.
Returns:
str: The client_id.
If a client id was passed in, it will be the same as what was passed
in. If no client id was passed in then it will be a random unique
string.
"""
if client_id is None:
client_id = str(uuid.uuid4())
if client_id in self._clients:
raise ArgumentError("Duplicate client_id: {}".format(client_id))
async def _client_callback(conn_string, _, event_name, event):
event_tuple = (conn_string, event_name, event)
await self._forward_client_event(client_id, event_tuple)
client_monitor = self.adapter.register_monitor([], [], _client_callback)
self._clients[client_id] = dict(user_data=user_data, connections={},
monitor=client_monitor)
self._adjust_global_events(client_id, scan, broadcast)
return client_id | Setup a newly connected client.
``client_id`` must be unique among all connected clients. If it is
passed as None, a random client_id will be generated as a string and
returned.
This method reserves internal resources for tracking what devices this
client has connected to and installs a monitor into the adapter on
behalf of the client.
It should be called whenever a new client connects to the device server
before any other activities by that client are allowed. By default,
all clients start receiving ``device_seen`` events but if you want
your client to also receive broadcast events, you can pass broadcast=True.
Args:
client_id (str): A unique identifier for this client that will be
used to refer to it in all future interactions. If this is
None, then a random string will be generated for the client_id.
user_data (object): An arbitrary object that you would like to store
with this client and will be passed to your event handler when
events are forwarded to this client.
scan (bool): Whether to install a monitor to listen for device_found
events.
broadcast (bool): Whether to install a monitor to list for broadcast
events.
Returns:
str: The client_id.
If a client id was passed in, it will be the same as what was passed
in. If no client id was passed in then it will be a random unique
string. |
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
) | Return the set of keys which have changed. |
def build_url_request(self):
"""
Consults the authenticator and grant for HTTP request parameters and
headers to send with the access token request, builds the request using
the stored endpoint and returns it.
"""
params = {}
headers = {}
self._authenticator(params, headers)
self._grant(params)
return Request(self._endpoint, urlencode(params), headers) | Consults the authenticator and grant for HTTP request parameters and
headers to send with the access token request, builds the request using
the stored endpoint and returns it. |
def from_iterable(cls, frames, sort=False):
"""
Build a :class:`FrameSet` from an iterable of frames.
Args:
frames (collections.Iterable): an iterable object containing frames as integers
sort (bool): True to sort frames before creation, default is False
Returns:
:class:`FrameSet`:
"""
return FrameSet(sorted(frames) if sort else frames) | Build a :class:`FrameSet` from an iterable of frames.
Args:
frames (collections.Iterable): an iterable object containing frames as integers
sort (bool): True to sort frames before creation, default is False
Returns:
:class:`FrameSet`: |
def trace_dispatch(self, frame, event, arg):
"""allow to switch to Pdb instance"""
if hasattr(self, 'pdb'):
return self.pdb.trace_dispatch(frame, event, arg)
else:
return Pdb.trace_dispatch(self, frame, event, arg) | allow to switch to Pdb instance |
def add(self, f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed):
"""
Add a t_hosts record
:param f_ipaddr: IP address
:param f_macaddr: MAC Address
:param f_hostname: Hostname
:param f_netbios_name: NetBIOS Name
:param f_engineer: Engineer username
:param f_asset_group: Asset group
:param f_confirmed: Confirmed boolean
:return: (True/False, t_hosts.id or response message)
"""
return self.send.host_add(f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer,
f_asset_group, f_confirmed) | Add a t_hosts record
:param f_ipaddr: IP address
:param f_macaddr: MAC Address
:param f_hostname: Hostname
:param f_netbios_name: NetBIOS Name
:param f_engineer: Engineer username
:param f_asset_group: Asset group
:param f_confirmed: Confirmed boolean
:return: (True/False, t_hosts.id or response message) |
def save(self, filename, ftype='HDF5'): # pragma: no coverage
"""
Save all the model parameters into a file (HDF5 by default).
This is not supported yet. We are working on having a consistent,
human readable way of saving and loading GPy models. This only
saves the parameter array to a hdf5 file. In order
to load the model again, use the same script for building the model
you used to build this model. Then load the param array from this hdf5
file and set the parameters of the created model:
>>> m[:] = h5_file['param_array']
This is less then optimal, we are working on a better solution to that.
"""
from ..param import Param
def gather_params(self, plist):
if isinstance(self,Param):
plist.append(self)
plist = []
self.traverse(gather_params, plist)
names = self.parameter_names(adjust_for_printing=True)
if ftype=='HDF5':
try:
import h5py
f = h5py.File(filename,'w')
for p,n in zip(plist,names):
n = n.replace('.','_')
p = p.values
d = f.create_dataset(n,p.shape,dtype=p.dtype)
d[:] = p
if hasattr(self, 'param_array'):
d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype)
d[:] = self.param_array
f.close()
except:
raise 'Fails to write the parameters into a HDF5 file!' | Save all the model parameters into a file (HDF5 by default).
This is not supported yet. We are working on having a consistent,
human readable way of saving and loading GPy models. This only
saves the parameter array to a hdf5 file. In order
to load the model again, use the same script for building the model
you used to build this model. Then load the param array from this hdf5
file and set the parameters of the created model:
>>> m[:] = h5_file['param_array']
This is less then optimal, we are working on a better solution to that. |
def create_mbed_detector(**kwargs):
"""! Factory used to create host OS specific mbed-lstools object
:param kwargs: keyword arguments to pass along to the constructors
@return Returns MbedLsTools object or None if host OS is not supported
"""
host_os = platform.system()
if host_os == "Windows":
from .windows import StlinkDetectWindows
return StlinkDetectWindows(**kwargs)
elif host_os == "Linux":
from .linux import StlinkDetectLinuxGeneric
return StlinkDetectLinuxGeneric(**kwargs)
elif host_os == "Darwin":
from .darwin import StlinkDetectDarwin
return StlinkDetectDarwin(**kwargs)
else:
return None | ! Factory used to create host OS specific mbed-lstools object
:param kwargs: keyword arguments to pass along to the constructors
@return Returns MbedLsTools object or None if host OS is not supported |
def get_sorted_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
recursive: bool = True) -> List[str]:
"""
Returns a sorted list of filenames to process, from a filename,
a glob string, or a list of filenames/globs.
Args:
source_filenames_or_globs: filename/glob, or list of them
recursive: use :func:`glob.glob` in recursive mode?
Returns:
sorted list of files to process
"""
if isinstance(source_filenames_or_globs, str):
source_filenames_or_globs = [source_filenames_or_globs]
final_filenames = [] # type: List[str]
for sfg in source_filenames_or_globs:
sfg_expanded = expanduser(sfg)
log.debug("Looking for: {!r}", sfg_expanded)
for filename in glob.glob(sfg_expanded, recursive=recursive):
log.debug("Trying: {!r}", filename)
if self.should_exclude(filename):
log.info("Skipping file {!r}", filename)
continue
final_filenames.append(filename)
final_filenames.sort()
return final_filenames | Returns a sorted list of filenames to process, from a filename,
a glob string, or a list of filenames/globs.
Args:
source_filenames_or_globs: filename/glob, or list of them
recursive: use :func:`glob.glob` in recursive mode?
Returns:
sorted list of files to process |
def create_host(self, host_id, name, ipaddr, rack_id = None):
"""
Create a host.
@param host_id: The host id.
@param name: Host name
@param ipaddr: IP address
@param rack_id: Rack id. Default None.
@return: An ApiHost object
"""
return hosts.create_host(self, host_id, name, ipaddr, rack_id) | Create a host.
@param host_id: The host id.
@param name: Host name
@param ipaddr: IP address
@param rack_id: Rack id. Default None.
@return: An ApiHost object |
def _encode_dict_as_string(value):
"""Takes the PLIST string of a dict, and returns the same string
encoded such that it can be included in the string representation
of a GSNode."""
# Strip the first and last newlines
if value.startswith("{\n"):
value = "{" + value[2:]
if value.endswith("\n}"):
value = value[:-2] + "}"
# escape double quotes and newlines
return value.replace('"', '\\"').replace("\\n", "\\\\n").replace("\n", "\\n") | Takes the PLIST string of a dict, and returns the same string
encoded such that it can be included in the string representation
of a GSNode. |
def get_extended_summaryf(self, *args, **kwargs):
"""Extract the extended summary from a function docstring
This function can be used as a decorator to extract the extended
summary of a function docstring (similar to :meth:`get_sectionsf`).
Parameters
----------
``*args`` and ``**kwargs``
See the :meth:`get_extended_summary` method. Note, that the first
argument will be the docstring of the specified function
Returns
-------
function
Wrapper that takes a function as input and registers its summary
via the :meth:`get_extended_summary` method"""
def func(f):
doc = f.__doc__
self.get_extended_summary(doc or '', *args, **kwargs)
return f
return func | Extract the extended summary from a function docstring
This function can be used as a decorator to extract the extended
summary of a function docstring (similar to :meth:`get_sectionsf`).
Parameters
----------
``*args`` and ``**kwargs``
See the :meth:`get_extended_summary` method. Note, that the first
argument will be the docstring of the specified function
Returns
-------
function
Wrapper that takes a function as input and registers its summary
via the :meth:`get_extended_summary` method |
def _get_pieces(tiles, ports, players_opts, pieces_opts):
"""
Generate a dictionary of pieces using the given options.
pieces options supported:
- Opt.empty -> no locations have pieces
- Opt.random ->
- Opt.preset -> robber is placed on the first desert found
- Opt.debug -> a variety of pieces are placed around the board
:param tiles: list of tiles from _generate_tiles
:param ports: list of ports from _generate_ports
:param players_opts: Opt
:param pieces_opts: Opt
:return: dictionary mapping (hexgrid.TYPE, coord:int) -> Piece
"""
if pieces_opts == Opt.empty:
return dict()
elif pieces_opts == Opt.debug:
players = catan.game.Game.get_debug_players()
return {
(hexgrid.NODE, 0x23): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[0]),
(hexgrid.EDGE, 0x22): catan.pieces.Piece(catan.pieces.PieceType.road, players[0]),
(hexgrid.NODE, 0x67): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[1]),
(hexgrid.EDGE, 0x98): catan.pieces.Piece(catan.pieces.PieceType.road, players[1]),
(hexgrid.NODE, 0x87): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[2]),
(hexgrid.EDGE, 0x89): catan.pieces.Piece(catan.pieces.PieceType.road, players[2]),
(hexgrid.EDGE, 0xA9): catan.pieces.Piece(catan.pieces.PieceType.road, players[3]),
(hexgrid.TILE, 0x77): catan.pieces.Piece(catan.pieces.PieceType.robber, None),
}
elif pieces_opts in (Opt.preset, ):
deserts = filter(lambda tile: tile.terrain == catan.board.Terrain.desert, tiles)
coord = hexgrid.tile_id_to_coord(list(deserts)[0].tile_id)
return {
(hexgrid.TILE, coord): catan.pieces.Piece(catan.pieces.PieceType.robber, None)
}
elif pieces_opts in (Opt.random, ):
logging.warning('{} option not yet implemented'.format(pieces_opts)) | Generate a dictionary of pieces using the given options.
pieces options supported:
- Opt.empty -> no locations have pieces
- Opt.random ->
- Opt.preset -> robber is placed on the first desert found
- Opt.debug -> a variety of pieces are placed around the board
:param tiles: list of tiles from _generate_tiles
:param ports: list of ports from _generate_ports
:param players_opts: Opt
:param pieces_opts: Opt
:return: dictionary mapping (hexgrid.TYPE, coord:int) -> Piece |
def pull_alignments_from(self, reads_to_use, shallow=False):
"""
Pull out alignments of certain reads
:param reads_to_use: numpy array of dtype=bool specifying which reads to use
:param shallow: whether to copy sparse 3D matrix only or not
:return: a new AlignmentPropertyMatrix object that particular reads are
"""
new_alnmat = self.copy(shallow=shallow)
for hid in xrange(self.num_haplotypes):
hdata = new_alnmat.data[hid]
hdata.data *= reads_to_use[hdata.indices]
hdata.eliminate_zeros()
if new_alnmat.count is not None:
new_alnmat.count[np.logical_not(reads_to_use)] = 0
return new_alnmat | Pull out alignments of certain reads
:param reads_to_use: numpy array of dtype=bool specifying which reads to use
:param shallow: whether to copy sparse 3D matrix only or not
:return: a new AlignmentPropertyMatrix object that particular reads are |
def crop_to_seg_extents(img, seg, padding):
"""Crop the image (usually MRI) to fit within the bounding box of a segmentation (or set of seg)"""
beg_coords, end_coords = crop_coords(seg, padding)
img = crop_3dimage(img, beg_coords, end_coords)
seg = crop_3dimage(seg, beg_coords, end_coords)
return img, seg | Crop the image (usually MRI) to fit within the bounding box of a segmentation (or set of seg) |
def update_item(self, item, expected_value=None, return_values=None):
"""
Commit pending item updates to Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to update in Amazon DynamoDB. It is expected
that you would have called the add_attribute, put_attribute
and/or delete_attribute methods on this Item prior to calling
this method. Those queued changes are what will be updated.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you
expect. This dictionary should have name/value pairs where the
name is the name of the attribute and the value is either the
value you are expecting or False if you expect the attribute
not to exist.
:type return_values: str
:param return_values: Controls the return of attribute name/value pairs
before they were updated. Possible values are: None, 'ALL_OLD',
'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
specified and the item is overwritten, the content of the old item
is returned. If 'ALL_NEW' is specified, then all the attributes of
the new version of the item are returned. If 'UPDATED_NEW' is
specified, the new versions of only the updated attributes are
returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
attr_updates = self.dynamize_attribute_updates(item._updates)
response = self.layer1.update_item(item.table.name, key,
attr_updates,
expected_value, return_values,
object_hook=item_object_hook)
item._updates.clear()
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response | Commit pending item updates to Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to update in Amazon DynamoDB. It is expected
that you would have called the add_attribute, put_attribute
and/or delete_attribute methods on this Item prior to calling
this method. Those queued changes are what will be updated.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you
expect. This dictionary should have name/value pairs where the
name is the name of the attribute and the value is either the
value you are expecting or False if you expect the attribute
not to exist.
:type return_values: str
:param return_values: Controls the return of attribute name/value pairs
before they were updated. Possible values are: None, 'ALL_OLD',
'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
specified and the item is overwritten, the content of the old item
is returned. If 'ALL_NEW' is specified, then all the attributes of
the new version of the item are returned. If 'UPDATED_NEW' is
specified, the new versions of only the updated attributes are
returned. |
def cmp(self,range2,overlap_size=0):
"""the comparitor for ranges
* return 1 if greater than range2
* return -1 if less than range2
* return 0 if overlapped
:param range2:
:param overlap_size: allow some padding for an 'equal' comparison (default 0)
:type range2: GenomicRange
:type overlap_size: int
"""
if self.overlaps(range2,padding=overlap_size): return 0
if self.chr < range2.chr: return -1
elif self.chr > range2.chr: return 1
elif self.end < range2.start: return -1
elif self.start > range2.end: return 1
sys.stderr.write("ERROR: cmp function unexpcted state\n")
sys.exit()
return 0 | the comparitor for ranges
* return 1 if greater than range2
* return -1 if less than range2
* return 0 if overlapped
:param range2:
:param overlap_size: allow some padding for an 'equal' comparison (default 0)
:type range2: GenomicRange
:type overlap_size: int |
def version(self):
"""Return the build date of the gentoo container."""
try:
_version = (curl[Gentoo._LATEST_TXT] | \
awk['NR==2{print}'] | \
cut["-f2", "-d="])().strip()
_version = datetime.utcfromtimestamp(int(_version))\
.strftime("%Y-%m-%d")
except ProcessExecutionError as proc_ex:
_version = "unknown"
LOG.error("Could not determine timestamp: %s",
str(proc_ex))
return _version | Return the build date of the gentoo container. |
def create(self):
"""Create the write buffer and cache directory."""
if not self._sync and not hasattr(self, '_buffer'):
self._buffer = {}
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir) | Create the write buffer and cache directory. |
def add(self, interval, offset):
"""
The added interval must be overlapping or beyond the last stored interval ie. added in sorted order.
:param interval: interval to add
:param offset: full virtual offset to add
:return:
"""
start, stop = self.get_start_stop(interval)
if len(self.starts) > 0:
if start < self.starts[-1] or offset <= self.offsets[-1][1]:
raise ValueError('intervals and offsets must be added in-order')
self.offsets[-1][1] = offset
self.offsets[-1][2] += 1
else:
self.starts.append(start)
self.stops.append(stop)
self.offsets.append([offset, offset, 1]) | The added interval must be overlapping or beyond the last stored interval ie. added in sorted order.
:param interval: interval to add
:param offset: full virtual offset to add
:return: |
def make_request(self, method, path, params={}, body="", username=None,
password=None, base_uri=None, content_type=None):
headers = {
'User-Agent': CreateSend.user_agent,
'Content-Type': 'application/json; charset=utf-8',
'Accept-Encoding': 'gzip, deflate'}
if content_type:
headers['Content-Type'] = content_type
parsed_base_uri = urlparse(
CreateSend.base_uri if not base_uri else base_uri)
"""username and password should only be set when it is intended that
the default basic authentication mechanism using the API key be
overridden (e.g. when using the apikey route with username and password)."""
if username and password:
headers['Authorization'] = "Basic %s" % base64.b64encode(
("%s:%s" % (username, password)).encode()).decode()
elif self.auth_details:
if 'api_key' in self.auth_details and self.auth_details['api_key']:
headers['Authorization'] = "Basic %s" % base64.b64encode(
("%s:x" % self.auth_details['api_key']).encode()).decode()
elif 'access_token' in self.auth_details and self.auth_details['access_token']:
headers['Authorization'] = "Bearer %s" % self.auth_details[
'access_token']
self.headers = headers
"""If in fake web mode (i.e. self.stub_request has been called),
self.faker should be set, and this request should be treated as a fake."""
if self.fake_web:
# Check that the actual url which would be requested matches
# self.faker.url.
actual_url = "https://%s%s" % (parsed_base_uri.netloc,
self.build_url(parsed_base_uri, path, params))
self.faker.actual_url = actual_url
def same_urls(url_a, url_b):
a = urlparse(url_a)
b = urlparse(url_b)
return (a.scheme == b.scheme and
a.netloc == b.netloc and
a.path == b.path and
a.params == b.params and
parse_qs(a.query) == parse_qs(b.query) and
a.fragment == b.fragment
)
if not same_urls(self.faker.url, actual_url):
raise Exception("Faker's expected URL (%s) doesn't match actual URL (%s)" % (
self.faker.url, actual_url))
self.faker.actual_body = body
def same_bodies(body_a, body_b):
return json.loads(body_a) == json.loads(body_b)
if self.faker.body is not None:
if not same_bodies(self.faker.body, body):
raise Exception("Faker's expected body (%s) doesn't match actual body (%s)" % (
self.faker.body, body))
data = self.faker.open() if self.faker else ''
status = self.faker.status if (
self.faker and self.faker.status) else 200
return self.handle_response(status, data)
c = VerifiedHTTPSConnection(parsed_base_uri.netloc, timeout=self.timeout)
c.request(method, self.build_url(
parsed_base_uri, path, params), body, headers)
response = c.getresponse()
if response.getheader('content-encoding', '') == 'gzip':
data = gzip.GzipFile(fileobj=BytesIO(response.read())).read()
else:
data = response.read()
c.close()
return self.handle_response(response.status, data) | username and password should only be set when it is intended that
the default basic authentication mechanism using the API key be
overridden (e.g. when using the apikey route with username and password). |
def derivative(self, point=None):
"""Return the derivative operator.
The partial derivative is usually linear, but in case the 'constant'
``pad_mode`` is used with nonzero ``pad_const``, the
derivative is given by the derivative with 0 ``pad_const``.
Parameters
----------
point : `domain` `element-like`, optional
The point to take the derivative in. Does not change the result
since the operator is affine.
"""
if self.pad_mode == 'constant' and self.pad_const != 0:
return PartialDerivative(self.domain, self.axis, self.range,
self.method, self.pad_mode, 0)
else:
return self | Return the derivative operator.
The partial derivative is usually linear, but in case the 'constant'
``pad_mode`` is used with nonzero ``pad_const``, the
derivative is given by the derivative with 0 ``pad_const``.
Parameters
----------
point : `domain` `element-like`, optional
The point to take the derivative in. Does not change the result
since the operator is affine. |
def cache(opts, serial):
'''
Returns the returner modules
'''
return LazyLoader(
_module_dirs(opts, 'cache', 'cache'),
opts,
tag='cache',
pack={'__opts__': opts, '__context__': {'serial': serial}},
) | Returns the returner modules |
def generate(self, api):
# type: (Api) -> None
"""
Generates a module for each namespace.
Each namespace will have Python classes to represent data types and
routes in the Stone spec.
"""
for namespace in api.namespaces.values():
with self.output_to_relative_path('{}.pyi'.format(fmt_namespace(namespace.name))):
self._generate_base_namespace_module(namespace) | Generates a module for each namespace.
Each namespace will have Python classes to represent data types and
routes in the Stone spec. |
def clr(args):
"""
%prog clr [bamfile|bedpefile] ref.fasta
Use mates from BEDPE to extract ranges where the ref is covered by mates.
This is useful in detection of chimeric contigs.
"""
p = OptionParser(clr.__doc__)
p.set_bedpe()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedpe, ref = args
if bedpe.endswith(".bam"):
bedpefile = bedpe.replace(".bam", ".bedpe")
if need_update(bedpe, bedpefile):
cmd = "bamToBed -bedpe -i {0}".format(bedpe)
sh(cmd, outfile=bedpefile)
bedpe = bedpefile
filtered = bedpe + ".filtered"
if need_update(bedpe, filtered):
filter_bedpe(bedpe, filtered, ref, rc=opts.rc,
minlen=opts.minlen, maxlen=opts.maxlen)
rmdup = filtered + ".sorted.rmdup"
if need_update(filtered, rmdup):
rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup)
converted = rmdup + ".converted"
if need_update(rmdup, converted):
fp = open(rmdup)
fw = open(converted, "w")
for row in fp:
r = BedpeLine(row)
print(r.bedline, file=fw)
fw.close()
merged = converted + ".merge.bed"
if need_update(converted, merged):
mergeBed(converted) | %prog clr [bamfile|bedpefile] ref.fasta
Use mates from BEDPE to extract ranges where the ref is covered by mates.
This is useful in detection of chimeric contigs. |
def get_all_resources(datasets):
# type: (List['Dataset']) -> List[hdx.data.resource.Resource]
"""Get all resources from a list of datasets (such as returned by search)
Args:
datasets (List[Dataset]): list of datasets
Returns:
List[hdx.data.resource.Resource]: list of resources within those datasets
"""
resources = []
for dataset in datasets:
for resource in dataset.get_resources():
resources.append(resource)
return resources | Get all resources from a list of datasets (such as returned by search)
Args:
datasets (List[Dataset]): list of datasets
Returns:
List[hdx.data.resource.Resource]: list of resources within those datasets |
def DeleteResource(self, path, type, id, initial_headers, options=None):
"""Deletes a Azure Cosmos resource and returns it.
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The deleted Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'delete',
path,
id,
type,
options)
# Delete will use WriteEndpoint since it uses DELETE operation
request = request_object._RequestObject(type, documents._OperationType.Delete)
result, self.last_response_headers = self.__Delete(path,
request,
headers)
# update session for request mutates data on server side
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result | Deletes a Azure Cosmos resource and returns it.
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The deleted Azure Cosmos resource.
:rtype:
dict |
def update_existing_peers( self, num_to_remove, peer_table=None, con=None, path=None ):
"""
Update the set of existing peers:
* revalidate the existing but old peers
* remove at most $num_to_remove unhealthy peers
Return the number of peers removed
"""
if path is None:
path = self.atlasdb_path
# remove peers that are too old
if self.last_clean_time + atlas_peer_clean_interval() < time_now():
# remove stale peers
log.debug("%s: revalidate old peers" % self.my_hostport)
atlas_revalidate_peers( con=con, path=path, peer_table=peer_table )
self.last_clean_time = time_now()
removed = self.remove_unhealthy_peers( num_to_remove, con=con, path=path, peer_table=peer_table )
# if they're also in the new set, remove them there too
for peer in removed:
if peer in self.new_peers:
self.new_peers.remove(peer)
return len(removed) | Update the set of existing peers:
* revalidate the existing but old peers
* remove at most $num_to_remove unhealthy peers
Return the number of peers removed |
def _does_not_contain_replica_sections(sysmeta_pyxb):
"""Assert that ``sysmeta_pyxb`` does not contain any replica information."""
if len(getattr(sysmeta_pyxb, 'replica', [])):
raise d1_common.types.exceptions.InvalidSystemMetadata(
0,
'A replica section was included. A new object object created via '
'create() or update() cannot already have replicas. pid="{}"'.format(
d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
),
identifier=d1_common.xml.get_req_val(sysmeta_pyxb.identifier),
) | Assert that ``sysmeta_pyxb`` does not contain any replica information. |
def generate_key(filepath):
''' generates a new, random secret key at the given location on the
filesystem and returns its path
'''
fs = path.abspath(path.expanduser(filepath))
with open(fs, 'wb') as outfile:
outfile.write(Fernet.generate_key())
chmod(fs, 0o400)
return fs | generates a new, random secret key at the given location on the
filesystem and returns its path |
def build_absolute_uri(request, url):
"""
Allow to override printing url, not necessarily on the same
server instance.
"""
if app_settings.get('CAPTURE_ROOT_URL'):
return urljoin(app_settings.get('CAPTURE_ROOT_URL'), url)
return request.build_absolute_uri(url) | Allow to override printing url, not necessarily on the same
server instance. |
def valid_address(address):
"""
Determines whether the specified address string is valid.
"""
if not address:
return False
components = str(address).split(':')
if len(components) > 2 or not valid_hostname(components[0]):
return False
if len(components) == 2 and not valid_port(components[1]):
return False
return True | Determines whether the specified address string is valid. |
def _plot_ts_cols(ts):
"""
Get variable + values vs year, age, depth (whichever are available)
:param dict ts: TimeSeries dictionary
:return dict: Key: variableName, Value: Panda Series object
"""
logger_dataframes.info("enter get_ts_cols()")
d = {}
# Not entirely necessary, but this will make the column headers look nicer for the data frame
# The column header will be in format "variableName (units)"
try:
units = " (" + ts["paleoData_units"] + ")"
except KeyError as e:
units = ""
logger_dataframes.warn("get_ts_cols: KeyError: paleoData_units not found, {}".format(e))
try:
d[ts["paleoData_variableName"] + units] = ts["paleoData_values"]
except KeyError as e:
logger_dataframes.warn("get_ts_cols: KeyError: variableName or values not found, {}".format(e))
# Start looking for age, year, depth columns
for k, v in ts.items():
if re_pandas_x_num.match(k):
try:
units = " (" + ts[k + "Units"] + ")"
d[k + units] = v
except KeyError as e:
logger_dataframes.warn("get_ts_cols: KeyError: Special column units, {}, {}".format(k, e))
logger_dataframes.info("exit get_ts_cols: found {}".format(len(d)))
return d | Get variable + values vs year, age, depth (whichever are available)
:param dict ts: TimeSeries dictionary
:return dict: Key: variableName, Value: Panda Series object |
def video_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn) | Compute loss numerator and denominator for one shard of output. |
def _collect_args(args) -> ISeq:
"""Collect Python starred arguments into a Basilisp list."""
if isinstance(args, tuple):
return llist.list(args)
raise TypeError("Python variadic arguments should always be a tuple") | Collect Python starred arguments into a Basilisp list. |
def diff_medians(array_one, array_two):
"""
Computes the difference in medians between two arrays of values.
Given arrays will be flattened (to 1D array) regardless of dimension,
and any non-finite/NaN values will be ignored.
Parameters
----------
array_one, array_two : iterable
Two arrays of values, possibly of different length.
Returns
-------
diff_medians : float
scalar measuring the difference in medians, ignoring NaNs/non-finite values.
Raises
------
ValueError
If one or more of the arrays are empty.
"""
array_one = check_array(array_one)
array_two = check_array(array_two)
diff_medians = np.ma.median(array_one) - np.ma.median(array_two)
return diff_medians | Computes the difference in medians between two arrays of values.
Given arrays will be flattened (to 1D array) regardless of dimension,
and any non-finite/NaN values will be ignored.
Parameters
----------
array_one, array_two : iterable
Two arrays of values, possibly of different length.
Returns
-------
diff_medians : float
scalar measuring the difference in medians, ignoring NaNs/non-finite values.
Raises
------
ValueError
If one or more of the arrays are empty. |
def get_equalisers(self):
"""Get the equaliser modes supported by this device."""
if not self.__equalisers:
self.__equalisers = yield from self.handle_list(
self.API.get('equalisers'))
return self.__equalisers | Get the equaliser modes supported by this device. |
def getDataFromFIFO(self, bytesToRead):
"""
reads the specified number of bytes from the FIFO, should be called after a call to getFifoCount to ensure there
is new data available (to avoid reading duplicate data).
:param bytesToRead: the number of bytes to read.
:return: the bytes read.
"""
return self.i2c_io.readBlock(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_R_W, bytesToRead) | reads the specified number of bytes from the FIFO, should be called after a call to getFifoCount to ensure there
is new data available (to avoid reading duplicate data).
:param bytesToRead: the number of bytes to read.
:return: the bytes read. |
def delete(self):
"""Delete the persistent identifier.
If the persistent identifier haven't been registered yet, it is
removed from the database. Otherwise, it's marked as
:attr:`invenio_pidstore.models.PIDStatus.DELETED`.
:returns: `True` if the PID is successfully removed.
"""
removed = False
try:
with db.session.begin_nested():
if self.is_new():
# New persistent identifier which haven't been registered
# yet.
db.session.delete(self)
removed = True
else:
self.status = PIDStatus.DELETED
db.session.add(self)
except SQLAlchemyError:
logger.exception("Failed to delete PID.", extra=dict(pid=self))
raise
if removed:
logger.info("Deleted PID (removed).", extra=dict(pid=self))
else:
logger.info("Deleted PID.", extra=dict(pid=self))
return True | Delete the persistent identifier.
If the persistent identifier haven't been registered yet, it is
removed from the database. Otherwise, it's marked as
:attr:`invenio_pidstore.models.PIDStatus.DELETED`.
:returns: `True` if the PID is successfully removed. |
def dim(self):
"""
NAME:
dim
PURPOSE:
return the dimension of the Orbit
INPUT:
(none)
OUTPUT:
dimension
HISTORY:
2011-02-03 - Written - Bovy (NYU)
"""
if len(self._orb.vxvv) == 2:
return 1
elif len(self._orb.vxvv) == 3 or len(self._orb.vxvv) == 4:
return 2
elif len(self._orb.vxvv) == 5 or len(self._orb.vxvv) == 6:
return 3 | NAME:
dim
PURPOSE:
return the dimension of the Orbit
INPUT:
(none)
OUTPUT:
dimension
HISTORY:
2011-02-03 - Written - Bovy (NYU) |
def QA_SU_save_stock_terminated(client=DATABASE):
'''
获取已经被终止上市的股票列表,数据从上交所获取,目前只有在上海证券交易所交易被终止的股票。
collection:
code:股票代码 name:股票名称 oDate:上市日期 tDate:终止上市日期
:param client:
:return: None
'''
# 🛠todo 已经失效从wind 资讯里获取
# 这个函数已经失效
print("!!! tushare 这个函数已经失效!!!")
df = QATs.get_terminated()
#df = QATs.get_suspended()
print(
" Get stock terminated from tushare,stock count is %d (终止上市股票列表)" %
len(df)
)
coll = client.stock_terminated
client.drop_collection(coll)
json_data = json.loads(df.reset_index().to_json(orient='records'))
coll.insert(json_data)
print(" 保存终止上市股票列表 到 stock_terminated collection, OK") | 获取已经被终止上市的股票列表,数据从上交所获取,目前只有在上海证券交易所交易被终止的股票。
collection:
code:股票代码 name:股票名称 oDate:上市日期 tDate:终止上市日期
:param client:
:return: None |
def analyses(self):
"""Retrieve a list of analyzed samples.
:rtype: list
:return: List of objects referencing each analyzed file.
"""
response = self._request("tasks/list")
return json.loads(response.content.decode('utf-8'))['tasks'] | Retrieve a list of analyzed samples.
:rtype: list
:return: List of objects referencing each analyzed file. |
def set_level(self, position, channel=None):
"""Seek a specific value by specifying a float() from 0.0 to 1.0."""
try:
position = float(position)
except Exception as err:
LOG.debug("HelperLevel.set_level: Exception %s" % (err,))
return False
self.writeNodeData("LEVEL", position, channel) | Seek a specific value by specifying a float() from 0.0 to 1.0. |
def set_breaks_and_labels(self, ranges, layout_info, pidx):
"""
Add breaks and labels to the axes
Parameters
----------
ranges : dict-like
range information for the axes
layout_info : dict-like
facet layout information
pidx : int
Panel index
"""
ax = self.axs[pidx]
facet.set_breaks_and_labels(self, ranges, layout_info, pidx)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left') | Add breaks and labels to the axes
Parameters
----------
ranges : dict-like
range information for the axes
layout_info : dict-like
facet layout information
pidx : int
Panel index |
def read_input_registers(slave_id, starting_address, quantity):
""" Return ADU for Modbus function code 04: Read Input Registers.
:param slave_id: Number of slave.
:return: Byte array with ADU.
"""
function = ReadInputRegisters()
function.starting_address = starting_address
function.quantity = quantity
return _create_request_adu(slave_id, function.request_pdu) | Return ADU for Modbus function code 04: Read Input Registers.
:param slave_id: Number of slave.
:return: Byte array with ADU. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.