code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def safe_evaluate(command, glob, local):
"""
Continue to attempt to execute the given command, importing objects which
cause a NameError in the command
:param command: command for eval
:param glob: globals dict for eval
:param local: locals dict for eval
:return: command result
"""
while True:
try:
return eval(command, glob, local)
except NameError as e:
match = re.match("name '(.*)' is not defined", e.message)
if not match:
raise e
try:
exec ('import %s' % (match.group(1), )) in glob
except ImportError:
raise e | Continue to attempt to execute the given command, importing objects which
cause a NameError in the command
:param command: command for eval
:param glob: globals dict for eval
:param local: locals dict for eval
:return: command result |
def _remove_summary(self):
"""Removed packge size summary
"""
if self.size > 0:
print("\nRemoved summary")
print("=" * 79)
print("{0}Size of removed packages {1} {2}.{3}".format(
self.meta.color["GREY"], round(self.size, 2), self.unit,
self.meta.color["ENDC"])) | Removed packge size summary |
def show_instance(name, call=None):
'''
List the a single node, return dict of grains.
'''
local = salt.client.LocalClient()
ret = local.cmd(name, 'grains.items')
ret.update(_build_required_items(ret))
return ret | List the a single node, return dict of grains. |
def set_alarm_mode(self, mode):
"""
:param mode: one of [None, "activity", "tamper", "forced_entry"]
:return: nothing
"""
values = {"desired_state": {"alarm_mode": mode}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | :param mode: one of [None, "activity", "tamper", "forced_entry"]
:return: nothing |
def add_service(self, service_type, service_endpoint=None, values=None):
"""
Add a service to the list of services on the DDO.
:param service_type: Service
:param service_endpoint: Service endpoint, str
:param values: Python dict with serviceDefinitionId, templateId, serviceAgreementContract,
list of conditions and consume endpoint.
"""
if isinstance(service_type, Service):
service = service_type
else:
service = Service(service_endpoint, service_type, values, did=self._did)
logger.debug(f'Adding service with service type {service_type} with did {self._did}')
self._services.append(service) | Add a service to the list of services on the DDO.
:param service_type: Service
:param service_endpoint: Service endpoint, str
:param values: Python dict with serviceDefinitionId, templateId, serviceAgreementContract,
list of conditions and consume endpoint. |
def arity_evaluation_checker(function):
"""Build an evaluation checker that will return True when it is
guaranteed that all positional arguments have been accounted for.
"""
is_class = inspect.isclass(function)
if is_class:
function = function.__init__
function_info = inspect.getargspec(function)
function_args = function_info.args
if is_class:
# This is to handle the fact that self will get passed in
# automatically.
function_args = function_args[1:]
def evaluation_checker(*args, **kwargs):
kwarg_keys = set(kwargs.keys())
if function_info.keywords is None:
acceptable_kwargs = function_args[len(args):]
# Make sure that we didn't get an argument we can't handle.
if not kwarg_keys.issubset(acceptable_kwargs):
TypeError("Unrecognized Arguments: {0}".format(
[key for key in kwarg_keys
if key not in acceptable_kwargs]
))
needed_args = function_args[len(args):]
if function_info.defaults:
needed_args = needed_args[:-len(function_info.defaults)]
return not needed_args or kwarg_keys.issuperset(needed_args)
return evaluation_checker | Build an evaluation checker that will return True when it is
guaranteed that all positional arguments have been accounted for. |
def cached_property(func):
"""Special property decorator that caches the computed
property value in the object's instance dict the first
time it is accessed.
"""
name = func.__name__
doc = func.__doc__
def getter(self, name=name):
try:
return self.__dict__[name]
except KeyError:
self.__dict__[name] = value = func(self)
return value
getter.func_name = name
return property(getter, doc=doc) | Special property decorator that caches the computed
property value in the object's instance dict the first
time it is accessed. |
def _parse_name(self, team_data):
"""
Parses the team's name.
On the pages being parsed, the team's name doesn't follow the standard
parsing algorithm that we use for the fields, and requires a special
one-off algorithm. The name is attached in the 'title' attribute from
within 'team_ID'. A few simple regex subs captures the team name. The
'_name' attribute is applied with the captured team name from this
function.
Parameters
----------
team_data : string
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string.
"""
name = team_data('td[data-stat="team_ID"]:first')
name = re.sub(r'.*title="', '', str(name))
name = re.sub(r'".*', '', name)
setattr(self, '_name', name) | Parses the team's name.
On the pages being parsed, the team's name doesn't follow the standard
parsing algorithm that we use for the fields, and requires a special
one-off algorithm. The name is attached in the 'title' attribute from
within 'team_ID'. A few simple regex subs captures the team name. The
'_name' attribute is applied with the captured team name from this
function.
Parameters
----------
team_data : string
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string. |
def deploy_s3app(self):
"""Deploys artifacts contents to S3 bucket"""
utils.banner("Deploying S3 App")
primary_region = self.configs['pipeline']['primary_region']
s3obj = s3.S3Deployment(
app=self.app,
env=self.env,
region=self.region,
prop_path=self.json_path,
artifact_path=self.artifact_path,
artifact_version=self.artifact_version,
primary_region=primary_region)
s3obj.upload_artifacts() | Deploys artifacts contents to S3 bucket |
def render_js_template(self, template_path, element_id, context=None):
"""
Render a js template.
"""
context = context or {}
return u"<script type='text/template' id='{}'>\n{}\n</script>".format(
element_id,
self.render_template(template_path, context)
) | Render a js template. |
def color_pack2rgb(packed):
"""Returns r, g, b tuple from packed wx.ColourGetRGB value"""
r = packed & 255
g = (packed & (255 << 8)) >> 8
b = (packed & (255 << 16)) >> 16
return r, g, b | Returns r, g, b tuple from packed wx.ColourGetRGB value |
def calculate_ecef_velocity(inst):
"""
Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z)
"""
x = inst['position_ecef_x']
vel_x = (x.values[2:] - x.values[0:-2])/2.
y = inst['position_ecef_y']
vel_y = (y.values[2:] - y.values[0:-2])/2.
z = inst['position_ecef_z']
vel_z = (z.values[2:] - z.values[0:-2])/2.
inst[1:-1, 'velocity_ecef_x'] = vel_x
inst[1:-1, 'velocity_ecef_y'] = vel_y
inst[1:-1, 'velocity_ecef_z'] = vel_z
inst.meta['velocity_ecef_x'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_y'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_z'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
return | Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z) |
def nfa_json_importer(input_file: str) -> dict:
""" Imports a NFA from a JSON file.
:param str input_file: path+filename to JSON file;
:return: *(dict)* representing a NFA.
"""
file = open(input_file)
json_file = json.load(file)
transitions = {} # key [state in states, action in alphabet]
# value [Set of arriving states in states]
for p in json_file['transitions']:
transitions.setdefault((p[0], p[1]), set()).add(p[2])
nfa = {
'alphabet': set(json_file['alphabet']),
'states': set(json_file['states']),
'initial_states': set(json_file['initial_states']),
'accepting_states': set(json_file['accepting_states']),
'transitions': transitions
}
return nfa | Imports a NFA from a JSON file.
:param str input_file: path+filename to JSON file;
:return: *(dict)* representing a NFA. |
def get_processor_status(self, p, x, y):
"""Get the status of a given core and the application executing on it.
Returns
-------
:py:class:`.ProcessorStatus`
Representation of the current state of the processor.
"""
# Get the VCPU base
address = (self.read_struct_field("sv", "vcpu_base", x, y) +
self.structs[b"vcpu"].size * p)
# Get the VCPU data
data = self.read(address, self.structs[b"vcpu"].size, x, y)
# Build the kwargs that describe the current state
state = {
name.decode('utf-8'): struct.unpack(
f.pack_chars,
data[f.offset:f.offset+struct.calcsize(f.pack_chars)]
)[0] for (name, f) in iteritems(self.structs[b"vcpu"].fields)
}
state["registers"] = [state.pop("r{}".format(i)) for i in range(8)]
state["user_vars"] = [state.pop("user{}".format(i)) for i in range(4)]
state["app_name"] = state["app_name"].strip(b'\x00').decode('utf-8')
state["cpu_state"] = consts.AppState(state["cpu_state"])
state["rt_code"] = consts.RuntimeException(state["rt_code"])
sw_ver = state.pop("sw_ver")
state["version"] = ((sw_ver >> 16) & 0xFF,
(sw_ver >> 8) & 0xFF,
(sw_ver >> 0) & 0xFF)
for newname, oldname in [("iobuf_address", "iobuf"),
("program_state_register", "psr"),
("stack_pointer", "sp"),
("link_register", "lr"), ]:
state[newname] = state.pop(oldname)
state.pop("__PAD")
return ProcessorStatus(**state) | Get the status of a given core and the application executing on it.
Returns
-------
:py:class:`.ProcessorStatus`
Representation of the current state of the processor. |
def get_out_streamids(self):
"""Returns a set of output stream ids registered for this component"""
if self.outputs is None:
return set()
if not isinstance(self.outputs, (list, tuple)):
raise TypeError("Argument to outputs must be either list or tuple, given: %s"
% str(type(self.outputs)))
ret_lst = []
for output in self.outputs:
if not isinstance(output, (str, Stream)):
raise TypeError("Outputs must be a list of strings or Streams, given: %s" % str(output))
ret_lst.append(Stream.DEFAULT_STREAM_ID if isinstance(output, str) else output.stream_id)
return set(ret_lst) | Returns a set of output stream ids registered for this component |
def parse_time_trigger_string(trigger_frequency):
"""
:param trigger_frequency: human-readable and editable string in one of two formats:
- 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM'
- 'every NNN'
:return: return tuple (parsed_trigger_frequency, timer_klass)
"""
# replace multiple spaces with one
trigger_frequency = ' '.join(trigger_frequency.split())
if trigger_frequency.startswith(TRIGGER_PREAMBLE_AT):
# EventClock block
trigger_frequency = trigger_frequency[len(TRIGGER_PREAMBLE_AT):]
parsed_trigger_frequency = trigger_frequency.replace(' ', '').replace(',', ' ').split(' ')
timer_klass = EventClock
elif trigger_frequency.startswith(TRIGGER_PREAMBLE_EVERY):
# RepeatTimer block
trigger_frequency = trigger_frequency[len(TRIGGER_PREAMBLE_EVERY):]
parsed_trigger_frequency = int(trigger_frequency)
timer_klass = RepeatTimer
else:
raise ValueError('Unknown time trigger format {0}'.format(trigger_frequency))
return parsed_trigger_frequency, timer_klass | :param trigger_frequency: human-readable and editable string in one of two formats:
- 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM'
- 'every NNN'
:return: return tuple (parsed_trigger_frequency, timer_klass) |
def altitude(SCALED_PRESSURE, ground_pressure=None, ground_temp=None):
'''calculate barometric altitude'''
from . import mavutil
self = mavutil.mavfile_global
if ground_pressure is None:
if self.param('GND_ABS_PRESS', None) is None:
return 0
ground_pressure = self.param('GND_ABS_PRESS', 1)
if ground_temp is None:
ground_temp = self.param('GND_TEMP', 0)
scaling = ground_pressure / (SCALED_PRESSURE.press_abs*100.0)
temp = ground_temp + 273.15
return log(scaling) * temp * 29271.267 * 0.001 | calculate barometric altitude |
def get_nvr(self, epoch=None):
"""get NVR string from .spec Name, Version, Release and Epoch"""
name = self.get_tag('Name', expand_macros=True)
vr = self.get_vr(epoch=epoch)
return '%s-%s' % (name, vr) | get NVR string from .spec Name, Version, Release and Epoch |
def rowlengths(table):
"""
Report on row lengths found in the table. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... [u'B', u'3', u'7.8', True],
... ['D', 'xyz', 9.0],
... ['E', None],
... ['F', 9]]
>>> etl.rowlengths(table)
+--------+-------+
| length | count |
+========+=======+
| 3 | 3 |
+--------+-------+
| 2 | 2 |
+--------+-------+
| 4 | 1 |
+--------+-------+
Useful for finding potential problems in data files.
"""
counter = Counter()
for row in data(table):
counter[len(row)] += 1
output = [('length', 'count')]
output.extend(counter.most_common())
return wrap(output) | Report on row lengths found in the table. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... [u'B', u'3', u'7.8', True],
... ['D', 'xyz', 9.0],
... ['E', None],
... ['F', 9]]
>>> etl.rowlengths(table)
+--------+-------+
| length | count |
+========+=======+
| 3 | 3 |
+--------+-------+
| 2 | 2 |
+--------+-------+
| 4 | 1 |
+--------+-------+
Useful for finding potential problems in data files. |
def cmd_antenna(self, args):
'''set gcs location'''
if len(args) != 2:
if self.gcs_location is None:
print("GCS location not set")
else:
print("GCS location %s" % str(self.gcs_location))
return
self.gcs_location = (float(args[0]), float(args[1])) | set gcs location |
def bandpass_filter(data, k, w1, w2):
"""
This function will apply a bandpass filter to data. It will be kth
order and will select the band between w1 and w2.
Parameters
----------
data: array, dtype=float
The data you wish to filter
k: number, int
The order of approximation for the filter. A max value for
this isdata.size/2
w1: number, float
This is the lower bound for which frequencies will pass
through.
w2: number, float
This is the upper bound for which frequencies will pass
through.
Returns
-------
y: array, dtype=float
The filtered data.
"""
data = np.asarray(data)
low_w = np.pi * 2 / w2
high_w = np.pi * 2 / w1
bweights = np.zeros(2 * k + 1)
bweights[k] = (high_w - low_w) / np.pi
j = np.arange(1, int(k) + 1)
weights = 1 / (np.pi * j) * (sin(high_w * j) - sin(low_w * j))
bweights[k + j] = weights
bweights[:k] = weights[::-1]
bweights -= bweights.mean()
return fftconvolve(bweights, data, mode='valid') | This function will apply a bandpass filter to data. It will be kth
order and will select the band between w1 and w2.
Parameters
----------
data: array, dtype=float
The data you wish to filter
k: number, int
The order of approximation for the filter. A max value for
this isdata.size/2
w1: number, float
This is the lower bound for which frequencies will pass
through.
w2: number, float
This is the upper bound for which frequencies will pass
through.
Returns
-------
y: array, dtype=float
The filtered data. |
def list_modules(desc=False):
'''
List currently installed PSGet Modules on the system.
:param desc: If ``True``, the verbose description will be returned.
:type desc: ``bool``
CLI Example:
.. code-block:: bash
salt 'win01' psget.list_modules
salt 'win01' psget.list_modules desc=True
'''
cmd = 'Get-InstalledModule'
modules = _pshell(cmd)
if isinstance(modules, dict):
ret = []
if desc:
modules_ret = {}
modules_ret[modules['Name']] = copy.deepcopy(modules)
modules = modules_ret
return modules
ret.append(modules['Name'])
return ret
names = []
if desc:
names = {}
for module in modules:
if desc:
names[module['Name']] = module
continue
names.append(module['Name'])
return names | List currently installed PSGet Modules on the system.
:param desc: If ``True``, the verbose description will be returned.
:type desc: ``bool``
CLI Example:
.. code-block:: bash
salt 'win01' psget.list_modules
salt 'win01' psget.list_modules desc=True |
def unsetenv(key):
"""Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset
"""
key = path2fsn(key)
if is_win:
# python 3 has no unsetenv under Windows -> use our ctypes one as well
try:
del_windows_env_var(key)
except WindowsError:
pass
else:
os.unsetenv(key) | Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset |
def n_at_a_time(
items: List[int], n: int, fillvalue: str
) -> Iterator[Tuple[Union[int, str]]]:
"""Returns an iterator which groups n items at a time.
Any final partial tuple will be padded with the fillvalue
>>> list(n_at_a_time([1, 2, 3, 4, 5], 2, 'X'))
[(1, 2), (3, 4), (5, 'X')]
"""
it = iter(items)
return itertools.zip_longest(*[it] * n, fillvalue=fillvalue) | Returns an iterator which groups n items at a time.
Any final partial tuple will be padded with the fillvalue
>>> list(n_at_a_time([1, 2, 3, 4, 5], 2, 'X'))
[(1, 2), (3, 4), (5, 'X')] |
def simplex_select_entering_arc(self, t, pivot):
'''
API:
simplex_select_entering_arc(self, t, pivot)
Description:
Decides and returns entering arc using pivot rule.
Input:
t: current spanning tree solution
pivot: May be one of the following; 'first_eligible' or 'dantzig'.
'dantzig' is the default value.
Return:
Returns entering arc tuple (k,l)
'''
if pivot=='dantzig':
# pick the maximum violation
candidate = {}
for e in self.edge_attr:
if e in t.edge_attr:
continue
flow_ij = self.edge_attr[e]['flow']
potential_i = self.get_node(e[0]).get_attr('potential')
potential_j = self.get_node(e[1]).get_attr('potential')
capacity_ij = self.edge_attr[e]['capacity']
c_ij = self.edge_attr[e]['cost']
cpi_ij = c_ij - potential_i + potential_j
if flow_ij==0:
if cpi_ij < 0:
candidate[e] = cpi_ij
elif flow_ij==capacity_ij:
if cpi_ij > 0:
candidate[e] = cpi_ij
for e in candidate:
max_c = e
max_v = abs(candidate[e])
break
for e in candidate:
if max_v < abs(candidate[e]):
max_c = e
max_v = abs(candidate[e])
elif pivot=='first_eligible':
# pick the first eligible
for e in self.edge_attr:
if e in t.edge_attr:
continue
flow_ij = self.edge_attr[e]['flow']
potential_i = self.get_node(e[0]).get_attr('potential')
potential_j = self.get_node(e[1]).get_attr('potential')
capacity_ij = self.edge_attr[e]['capacity']
c_ij = self.edge_attr[e]['cost']
cpi_ij = c_ij - potential_i + potential_j
if flow_ij==0:
if cpi_ij < 0:
max_c = e
max_v = abs(cpi_ij)
elif flow_ij==capacity_ij:
if cpi_ij > 0:
max_c = e
max_v = cpi_ij
else:
raise Exception("Unknown pivot rule.")
return max_c | API:
simplex_select_entering_arc(self, t, pivot)
Description:
Decides and returns entering arc using pivot rule.
Input:
t: current spanning tree solution
pivot: May be one of the following; 'first_eligible' or 'dantzig'.
'dantzig' is the default value.
Return:
Returns entering arc tuple (k,l) |
def add_index(self, mode, blob_id, path):
"""
Add new entry to the current index
:param tree:
:return:
"""
self.command_exec(['update-index', '--add', '--cacheinfo', mode, blob_id, path]) | Add new entry to the current index
:param tree:
:return: |
def split_qs(string, delimiter='&'):
"""Split a string by the specified unquoted, not enclosed delimiter"""
open_list = '[<{('
close_list = ']>})'
quote_chars = '"\''
level = index = last_index = 0
quoted = False
result = []
for index, letter in enumerate(string):
if letter in quote_chars:
if not quoted:
quoted = True
level += 1
else:
quoted = False
level -= 1
elif letter in open_list:
level += 1
elif letter in close_list:
level -= 1
elif letter == delimiter and level == 0:
# Split here
element = string[last_index: index]
if element:
result.append(element)
last_index = index + 1
if index:
element = string[last_index: index + 1]
if element:
result.append(element)
return result | Split a string by the specified unquoted, not enclosed delimiter |
def members(self):
""" -> #set of all members in the set """
if self.serialized:
return set(map(
self._loads, self._client.smembers(self.key_prefix)))
else:
return set(map(
self._decode, self._client.smembers(self.key_prefix))) | -> #set of all members in the set |
def _comm_tensor_data(device_name,
node_name,
maybe_base_expanded_node_name,
output_slot,
debug_op,
tensor_value,
wall_time):
"""Create a dict() as the outgoing data in the tensor data comm route.
Note: The tensor data in the comm route does not include the value of the
tensor in its entirety in general. Only if a tensor satisfies the following
conditions will its entire value be included in the return value of this
method:
1. Has a numeric data type (e.g., float32, int32) and has fewer than 5
elements.
2. Is a string tensor and has fewer than 5 elements. Each string element is
up to 40 bytes.
Args:
device_name: Name of the device that the tensor is on.
node_name: (Original) name of the node that produces the tensor.
maybe_base_expanded_node_name: Possbily base-expanded node name.
output_slot: Output slot number.
debug_op: Name of the debug op.
tensor_value: Value of the tensor, as a numpy.ndarray.
wall_time: Wall timestamp for the tensor.
Returns:
A dict representing the tensor data.
"""
output_slot = int(output_slot)
logger.info(
'Recording tensor value: %s, %d, %s', node_name, output_slot, debug_op)
tensor_values = None
if isinstance(tensor_value, debug_data.InconvertibleTensorProto):
if not tensor_value.initialized:
tensor_dtype = UNINITIALIZED_TAG
tensor_shape = UNINITIALIZED_TAG
else:
tensor_dtype = UNSUPPORTED_TAG
tensor_shape = UNSUPPORTED_TAG
tensor_values = NA_TAG
else:
tensor_dtype = tensor_helper.translate_dtype(tensor_value.dtype)
tensor_shape = tensor_value.shape
# The /comm endpoint should respond with tensor values only if the tensor is
# small enough. Otherwise, the detailed values sould be queried through a
# dedicated tensor_data that supports slicing.
if tensor_helper.numel(tensor_shape) < 5:
_, _, tensor_values = tensor_helper.array_view(tensor_value)
if tensor_dtype == 'string' and tensor_value is not None:
tensor_values = tensor_helper.process_buffers_for_display(
tensor_values, limit=STRING_ELEMENT_MAX_LEN)
return {
'type': 'tensor',
'timestamp': wall_time,
'data': {
'device_name': device_name,
'node_name': node_name,
'maybe_base_expanded_node_name': maybe_base_expanded_node_name,
'output_slot': output_slot,
'debug_op': debug_op,
'dtype': tensor_dtype,
'shape': tensor_shape,
'values': tensor_values,
},
} | Create a dict() as the outgoing data in the tensor data comm route.
Note: The tensor data in the comm route does not include the value of the
tensor in its entirety in general. Only if a tensor satisfies the following
conditions will its entire value be included in the return value of this
method:
1. Has a numeric data type (e.g., float32, int32) and has fewer than 5
elements.
2. Is a string tensor and has fewer than 5 elements. Each string element is
up to 40 bytes.
Args:
device_name: Name of the device that the tensor is on.
node_name: (Original) name of the node that produces the tensor.
maybe_base_expanded_node_name: Possbily base-expanded node name.
output_slot: Output slot number.
debug_op: Name of the debug op.
tensor_value: Value of the tensor, as a numpy.ndarray.
wall_time: Wall timestamp for the tensor.
Returns:
A dict representing the tensor data. |
def property_data_zpool():
'''
Return a dict of zpool properties
.. note::
Each property will have an entry with the following info:
- edit : boolean - is this property editable after pool creation
- type : str - either bool, bool_alt, size, numeric, or string
- values : str - list of possible values
.. warning::
This data is probed from the output of 'zpool get' with some suplimental
data that is hardcoded. There is no better way to get this informatio aside
from reading the code.
'''
# NOTE: man page also mentions a few short forms
property_data = _property_parse_cmd(_zpool_cmd(), {
'allocated': 'alloc',
'autoexpand': 'expand',
'autoreplace': 'replace',
'listsnapshots': 'listsnaps',
'fragmentation': 'frag',
})
# NOTE: zpool status/iostat has a few extra fields
zpool_size_extra = [
'capacity-alloc', 'capacity-free',
'operations-read', 'operations-write',
'bandwith-read', 'bandwith-write',
'read', 'write',
]
zpool_numeric_extra = [
'cksum', 'cap',
]
for prop in zpool_size_extra:
property_data[prop] = {
'edit': False,
'type': 'size',
'values': '<size>',
}
for prop in zpool_numeric_extra:
property_data[prop] = {
'edit': False,
'type': 'numeric',
'values': '<count>',
}
return property_data | Return a dict of zpool properties
.. note::
Each property will have an entry with the following info:
- edit : boolean - is this property editable after pool creation
- type : str - either bool, bool_alt, size, numeric, or string
- values : str - list of possible values
.. warning::
This data is probed from the output of 'zpool get' with some suplimental
data that is hardcoded. There is no better way to get this informatio aside
from reading the code. |
def get_language_description(grammar_file):
"""
Gets the language description from given language grammar file.
:param grammar_file: Language grammar.
:type grammar_file: unicode
:return: Language description.
:rtype: Language
"""
LOGGER.debug("> Processing '{0}' grammar file.".format(grammar_file))
sections_file_parser = foundations.parsers.SectionsFileParser(grammar_file)
sections_file_parser.parse(strip_quotation_markers=False)
name = sections_file_parser.get_value("Name", "Language")
if not name:
raise LanguageGrammarError("{0} | '{1}' attribute not found in '{2}' file!".format(__name__,
"Language|Name",
grammar_file))
extensions = sections_file_parser.get_value("Extensions", "Language")
if not extensions:
raise LanguageGrammarError("{0} | '{1}' attribute not found in '{2}' file!".format(__name__,
"Language|Extensions",
grammar_file))
highlighter = get_object_from_language_accelerators(sections_file_parser.get_value("Highlighter", "Accelerators"))
completer = get_object_from_language_accelerators(sections_file_parser.get_value("Completer", "Accelerators"))
pre_input_accelerators = sections_file_parser.get_value("PreInputAccelerators", "Accelerators")
pre_input_accelerators = pre_input_accelerators and [get_object_from_language_accelerators(accelerator)
for accelerator in pre_input_accelerators.split("|")] or ()
post_input_accelerators = sections_file_parser.get_value("PostInputAccelerators", "Accelerators")
post_input_accelerators = post_input_accelerators and [get_object_from_language_accelerators(accelerator)
for accelerator in post_input_accelerators.split("|")] or ()
visual_accelerators = sections_file_parser.get_value("VisualAccelerators", "Accelerators")
visual_accelerators = visual_accelerators and [get_object_from_language_accelerators(accelerator)
for accelerator in visual_accelerators.split("|")] or ()
indent_marker = sections_file_parser.section_exists("Syntax") and sections_file_parser.get_value("IndentMarker",
"Syntax") or \
DEFAULT_INDENT_MARKER
comment_marker = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("CommentMarker", "Syntax") or ""
comment_block_marker_start = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("CommentBlockMarkerStart", "Syntax") or ""
comment_block_marker_end = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("CommentBlockMarkerEnd", "Syntax") or ""
symbols_pairs = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("SymbolsPairs", "Syntax") or {}
if symbols_pairs:
associated_pairs = foundations.data_structures.Lookup()
for pair in symbols_pairs.split("|"):
associated_pairs[pair[0]] = pair[1]
symbols_pairs = associated_pairs
indentation_symbols = sections_file_parser.section_exists("Syntax") and \
sections_file_parser.get_value("IndentationSymbols", "Syntax")
indentation_symbols = indentation_symbols and indentation_symbols.split("|") or ()
rules = []
attributes = sections_file_parser.sections.get("Rules")
if attributes:
for attribute in sections_file_parser.sections["Rules"]:
pattern = sections_file_parser.get_value(attribute, "Rules")
rules.append(umbra.ui.highlighters.Rule(name=foundations.namespace.remove_namespace(attribute),
pattern=QRegExp(pattern)))
tokens = []
dictionary = sections_file_parser.get_value("Dictionary", "Accelerators")
if dictionary:
dictionary_file = os.path.join(os.path.dirname(grammar_file), dictionary)
if foundations.common.path_exists(dictionary_file):
with open(dictionary_file, "r") as file:
for line in iter(file):
line = line.strip()
line and tokens.append(line)
else:
LOGGER.warning(
"!> {0} | '{1}' language dictionary file doesn't exists and will be skipped!".format(__name__,
dictionary_file))
theme = get_object_from_language_accelerators(sections_file_parser.get_value("Theme", "Accelerators")) or \
umbra.ui.highlighters.DEFAULT_THEME
attributes = {"name": name,
"file": grammar_file,
"parser": sections_file_parser,
"extensions": extensions,
"highlighter": highlighter,
"completer": completer,
"pre_input_accelerators": pre_input_accelerators,
"post_input_accelerators": post_input_accelerators,
"visual_accelerators": visual_accelerators,
"indent_marker": indent_marker,
"comment_marker": comment_marker,
"comment_block_marker_start": comment_block_marker_start,
"comment_block_marker_end": comment_block_marker_end,
"symbols_pairs": symbols_pairs,
"indentation_symbols": indentation_symbols,
"rules": rules,
"tokens": tokens,
"theme": theme}
for attribute, value in sorted(attributes.iteritems()):
if attribute == "rules":
LOGGER.debug("> Registered '{0}' syntax rules.".format(len(value)))
elif attribute == "tokens":
LOGGER.debug("> Registered '{0}' completion tokens.".format(len(value)))
else:
LOGGER.debug("> Attribute: '{0}', Value: '{1}'.".format(attribute, value))
return Language(**attributes) | Gets the language description from given language grammar file.
:param grammar_file: Language grammar.
:type grammar_file: unicode
:return: Language description.
:rtype: Language |
async def get_updates(self, offset: typing.Union[base.Integer, None] = None,
limit: typing.Union[base.Integer, None] = None,
timeout: typing.Union[base.Integer, None] = None,
allowed_updates:
typing.Union[typing.List[base.String], None] = None) -> typing.List[types.Update]:
"""
Use this method to receive incoming updates using long polling (wiki).
Notes
1. This method will not work if an outgoing webhook is set up.
2. In order to avoid getting duplicate updates, recalculate offset after each server response.
Source: https://core.telegram.org/bots/api#getupdates
:param offset: Identifier of the first update to be returned
:type offset: :obj:`typing.Union[base.Integer, None]`
:param limit: Limits the number of updates to be retrieved
:type limit: :obj:`typing.Union[base.Integer, None]`
:param timeout: Timeout in seconds for long polling
:type timeout: :obj:`typing.Union[base.Integer, None]`
:param allowed_updates: List the types of updates you want your bot to receive
:type allowed_updates: :obj:`typing.Union[typing.List[base.String], None]`
:return: An Array of Update objects is returned
:rtype: :obj:`typing.List[types.Update]`
"""
allowed_updates = prepare_arg(allowed_updates)
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_UPDATES, payload)
return [types.Update(**update) for update in result] | Use this method to receive incoming updates using long polling (wiki).
Notes
1. This method will not work if an outgoing webhook is set up.
2. In order to avoid getting duplicate updates, recalculate offset after each server response.
Source: https://core.telegram.org/bots/api#getupdates
:param offset: Identifier of the first update to be returned
:type offset: :obj:`typing.Union[base.Integer, None]`
:param limit: Limits the number of updates to be retrieved
:type limit: :obj:`typing.Union[base.Integer, None]`
:param timeout: Timeout in seconds for long polling
:type timeout: :obj:`typing.Union[base.Integer, None]`
:param allowed_updates: List the types of updates you want your bot to receive
:type allowed_updates: :obj:`typing.Union[typing.List[base.String], None]`
:return: An Array of Update objects is returned
:rtype: :obj:`typing.List[types.Update]` |
def get_position_i(self):
""" Get the I value of the current PID for position
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KI_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("Could not read from motors") | Get the I value of the current PID for position |
def create(self, email, tos=1, options=None):
""" Creates an account with Zencoder, no API Key necessary.
https://app.zencoder.com/docs/api/accounts/create
"""
data = {'email': email,
'terms_of_service': str(tos)}
if options:
data.update(options)
return self.post(self.base_url, body=json.dumps(data)) | Creates an account with Zencoder, no API Key necessary.
https://app.zencoder.com/docs/api/accounts/create |
def save(self, name, content):
"""
Saves new content to the file specified by name. The content should be a
proper File object, ready to be read from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
name = self.get_available_name(name)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return name.replace("\\", "/") | Saves new content to the file specified by name. The content should be a
proper File object, ready to be read from the beginning. |
def updatej9DB(dbname = abrevDBname, saveRawHTML = False):
"""Updates the database of Journal Title Abbreviations. Requires an internet connection. The data base is saved relative to the source file not the working directory.
# Parameters
_dbname_ : `optional [str]`
> The name of the database file, default is "j9Abbreviations.db"
_saveRawHTML_ : `optional [bool]`
> Determines if the original HTML of the pages is stored, default `False`. If `True` they are saved in a directory inside j9Raws begining with todays date.
"""
if saveRawHTML:
rawDir = '{}/j9Raws'.format(os.path.dirname(__file__))
if not os.path.isdir(rawDir):
os.mkdir(rawDir)
_j9SaveCurrent(sDir = rawDir)
dbLoc = os.path.join(os.path.normpath(os.path.dirname(__file__)), dbname)
try:
with dbm.dumb.open(dbLoc, flag = 'c') as db:
try:
j9Dict = _getCurrentj9Dict()
except urllib.error.URLError:
raise urllib.error.URLError("Unable to access server, check your connection")
for k, v in j9Dict.items():
if k in db:
for jName in v:
if jName not in j9Dict[k]:
j9Dict[k] += '|' + jName
else:
db[k] = '|'.join(v)
except dbm.dumb.error as e:
raise JournalDataBaseError("Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn't work (sorry), deleteing everything in '{}' and reinstalling metaknowledge should.\nThe error was '{}'".format(dbLoc, os.path.dirname(__file__), e)) | Updates the database of Journal Title Abbreviations. Requires an internet connection. The data base is saved relative to the source file not the working directory.
# Parameters
_dbname_ : `optional [str]`
> The name of the database file, default is "j9Abbreviations.db"
_saveRawHTML_ : `optional [bool]`
> Determines if the original HTML of the pages is stored, default `False`. If `True` they are saved in a directory inside j9Raws begining with todays date. |
def execute(self):
"""
params = {
"ApexCode" : "None",
"ApexProfiling" : "01pd0000001yXtYAAU",
"Callout" : True,
"Database" : 1,
"ExpirationDate" : 3,
"ScopeId" : "",
"System" : "",
"TracedEntityId" : "",
"Validation" : "",
"Visualforce" : "",
"Workflow" : ""
}
"""
if 'type' not in self.params:
raise MMException("Please include the type of log, 'user' or 'apex'")
if 'debug_categories' not in self.params:
raise MMException("Please include debug categories in dictionary format: e.g.: {'ApexCode':'DEBUG', 'Visualforce':'INFO'}")
request = {}
if self.params['type'] == 'user':
request['ScopeId'] = None
request['TracedEntityId'] = self.params.get('user_id', config.sfdc_client.user_id)
elif self.params['type'] == 'apex':
#request['ScopeId'] = 'user'
request['ScopeId'] = config.sfdc_client.user_id
request['TracedEntityId'] = self.params['apex_id']
for c in self.params['debug_categories']:
if 'category' in c:
request[c['category']] = c['level']
else:
request[c] = self.params['debug_categories'][c]
request['ExpirationDate'] = util.get_iso_8601_timestamp(int(float(self.params.get('expiration', 30))))
config.logger.debug(self.params['debug_categories'])
config.logger.debug("Log creation reuqest--->")
config.logger.debug(request)
create_result = config.sfdc_client.create_trace_flag(request)
config.logger.debug("Log creation response--->")
config.logger.debug(create_result)
if type(create_result) is list:
create_result = create_result[0]
if type(create_result) is not str and type(create_result) is not unicode:
return json.dumps(create_result)
else:
return create_result | params = {
"ApexCode" : "None",
"ApexProfiling" : "01pd0000001yXtYAAU",
"Callout" : True,
"Database" : 1,
"ExpirationDate" : 3,
"ScopeId" : "",
"System" : "",
"TracedEntityId" : "",
"Validation" : "",
"Visualforce" : "",
"Workflow" : ""
} |
def tie_properties(self, class_list):
""" Runs through the classess and ties the properties to the class
args:
class_list: a list of class names to run
"""
log.setLevel(self.log_level)
start = datetime.datetime.now()
log.info(" Tieing properties to the class")
for cls_name in class_list:
cls_obj = getattr(MODULE.rdfclass, cls_name)
prop_dict = dict(cls_obj.properties)
for prop_name, prop_obj in cls_obj.properties.items():
setattr(cls_obj, prop_name, link_property(prop_obj, cls_obj))
log.info(" Finished tieing properties in: %s",
(datetime.datetime.now() - start)) | Runs through the classess and ties the properties to the class
args:
class_list: a list of class names to run |
def change_email(self, email):
"""
Changes the email address for a user.
A user needs to verify this new email address before it becomes
active. By storing the new email address in a temporary field
-- ``temporary_email`` -- we are able to set this email address
after the user has verified it by clicking on the verification URI
in the email. This email gets send out by ``send_verification_email``.
:param email:
The new email address that the user wants to use.
"""
self.email_unconfirmed = email
salt, hash = generate_sha1(self.username)
self.email_confirmation_key = hash
self.email_confirmation_key_created = get_datetime_now()
self.save()
# Send email for activation
self.send_confirmation_email()
return self | Changes the email address for a user.
A user needs to verify this new email address before it becomes
active. By storing the new email address in a temporary field
-- ``temporary_email`` -- we are able to set this email address
after the user has verified it by clicking on the verification URI
in the email. This email gets send out by ``send_verification_email``.
:param email:
The new email address that the user wants to use. |
def readBIM(fileName):
"""Reads a BIM file.
:param fileName: the name of the BIM file to read.
:type fileName: str
:returns: the set of markers in the BIM file.
Reads a Plink BIM file and extract the name of the markers. There is one
marker per line, and the name of the marker is in the second column. There
is no header in the BIM file.
"""
# Reading the first BIM file
snps = set()
with open(fileName, "r") as inputFile:
for line in inputFile:
row = line.rstrip("\r\n").split("\t")
snpName = row[1]
snps.add(snpName)
return snps | Reads a BIM file.
:param fileName: the name of the BIM file to read.
:type fileName: str
:returns: the set of markers in the BIM file.
Reads a Plink BIM file and extract the name of the markers. There is one
marker per line, and the name of the marker is in the second column. There
is no header in the BIM file. |
def get_dataset(self, key, info):
"""Get calibrated channel data."""
if self.mdrs is None:
self._read_all(self.filename)
if key.name in ['longitude', 'latitude']:
lons, lats = self.get_full_lonlats()
if key.name == 'longitude':
dataset = create_xarray(lons)
else:
dataset = create_xarray(lats)
elif key.name in ['solar_zenith_angle', 'solar_azimuth_angle',
'satellite_zenith_angle', 'satellite_azimuth_angle']:
sun_azi, sun_zen, sat_azi, sat_zen = self.get_full_angles()
if key.name == 'solar_zenith_angle':
dataset = create_xarray(sun_zen)
elif key.name == 'solar_azimuth_angle':
dataset = create_xarray(sun_azi)
if key.name == 'satellite_zenith_angle':
dataset = create_xarray(sat_zen)
elif key.name == 'satellite_azimuth_angle':
dataset = create_xarray(sat_azi)
else:
mask = None
if key.calibration == 'counts':
raise ValueError('calibration=counts is not supported! ' +
'This reader cannot return counts')
elif key.calibration not in ['reflectance', 'brightness_temperature', 'radiance']:
raise ValueError('calibration type ' + str(key.calibration) +
' is not supported!')
if key.name in ['3A', '3a'] and self.three_a_mask is None:
self.three_a_mask = ((self["FRAME_INDICATOR"] & 2 ** 16) != 2 ** 16)
if key.name in ['3B', '3b'] and self.three_b_mask is None:
self.three_b_mask = ((self["FRAME_INDICATOR"] & 2 ** 16) != 0)
if key.name not in ["1", "2", "3a", "3A", "3b", "3B", "4", "5"]:
LOG.info("Can't load channel in eps_l1b: " + str(key.name))
return
if key.name == "1":
if key.calibration == 'reflectance':
array = radiance_to_refl(self["SCENE_RADIANCES"][:, 0, :],
self["CH1_SOLAR_FILTERED_IRRADIANCE"])
else:
array = self["SCENE_RADIANCES"][:, 0, :]
if key.name == "2":
if key.calibration == 'reflectance':
array = radiance_to_refl(self["SCENE_RADIANCES"][:, 1, :],
self["CH2_SOLAR_FILTERED_IRRADIANCE"])
else:
array = self["SCENE_RADIANCES"][:, 1, :]
if key.name.lower() == "3a":
if key.calibration == 'reflectance':
array = radiance_to_refl(self["SCENE_RADIANCES"][:, 2, :],
self["CH3A_SOLAR_FILTERED_IRRADIANCE"])
else:
array = self["SCENE_RADIANCES"][:, 2, :]
mask = np.empty(array.shape, dtype=bool)
mask[:, :] = self.three_a_mask[:, np.newaxis]
if key.name.lower() == "3b":
if key.calibration == 'brightness_temperature':
array = radiance_to_bt(self["SCENE_RADIANCES"][:, 2, :],
self["CH3B_CENTRAL_WAVENUMBER"],
self["CH3B_CONSTANT1"],
self["CH3B_CONSTANT2_SLOPE"])
else:
array = self["SCENE_RADIANCES"][:, 2, :]
mask = np.empty(array.shape, dtype=bool)
mask[:, :] = self.three_b_mask[:, np.newaxis]
if key.name == "4":
if key.calibration == 'brightness_temperature':
array = radiance_to_bt(self["SCENE_RADIANCES"][:, 3, :],
self["CH4_CENTRAL_WAVENUMBER"],
self["CH4_CONSTANT1"],
self["CH4_CONSTANT2_SLOPE"])
else:
array = self["SCENE_RADIANCES"][:, 3, :]
if key.name == "5":
if key.calibration == 'brightness_temperature':
array = radiance_to_bt(self["SCENE_RADIANCES"][:, 4, :],
self["CH5_CENTRAL_WAVENUMBER"],
self["CH5_CONSTANT1"],
self["CH5_CONSTANT2_SLOPE"])
else:
array = self["SCENE_RADIANCES"][:, 4, :]
dataset = create_xarray(array)
if mask is not None:
dataset = dataset.where(~mask)
dataset.attrs['platform_name'] = self.platform_name
dataset.attrs['sensor'] = self.sensor_name
dataset.attrs.update(info)
dataset.attrs.update(key.to_dict())
return dataset | Get calibrated channel data. |
def put(self, namespacePrefix):
"""Update a specific configuration namespace"""
self.reqparse.add_argument('name', type=str, required=True)
self.reqparse.add_argument('sortOrder', type=int, required=True)
args = self.reqparse.parse_args()
ns = db.ConfigNamespace.find_one(ConfigNamespace.namespace_prefix == namespacePrefix)
if not ns:
return self.make_response('No such namespace: {}'.format(namespacePrefix), HTTP.NOT_FOUND)
ns.name = args['name']
ns.sort_order = args['sortOrder']
db.session.add(ns)
db.session.commit()
self.dbconfig.reload_data()
auditlog(event='configNamespace.update', actor=session['user'].username, data=args)
return self.make_response('Namespace updated') | Update a specific configuration namespace |
def AddExtraShapes(extra_shapes_txt, graph):
"""
Add extra shapes into our input set by parsing them out of a GTFS-formatted
shapes.txt file. Useful for manually adding lines to a shape file, since it's
a pain to edit .shp files.
"""
print("Adding extra shapes from %s" % extra_shapes_txt)
try:
tmpdir = tempfile.mkdtemp()
shutil.copy(extra_shapes_txt, os.path.join(tmpdir, 'shapes.txt'))
loader = transitfeed.ShapeLoader(tmpdir)
schedule = loader.Load()
for shape in schedule.GetShapeList():
print("Adding extra shape: %s" % shape.shape_id)
graph.AddPoly(ShapeToPoly(shape))
finally:
if tmpdir:
shutil.rmtree(tmpdir) | Add extra shapes into our input set by parsing them out of a GTFS-formatted
shapes.txt file. Useful for manually adding lines to a shape file, since it's
a pain to edit .shp files. |
def multicat(data, samples, ipyclient):
"""
Runs singlecat and cleanup jobs for each sample.
For each sample this fills its own hdf5 array with catg data & indels.
This is messy, could use simplifiying.
"""
## progress ticker
start = time.time()
printstr = " indexing clusters | {} | s6 |"
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed))
## parallel client
lbview = ipyclient.load_balanced_view()
## First submit a sleeper job as temp_flag for cleanups
last_sample = 0
cleanups = {}
cleanups[last_sample] = lbview.apply(time.sleep, 0.0)
## get samples and names, sorted
snames = [i.name for i in samples]
snames.sort()
## Build an array for quickly indexing consens reads from catg files.
## save as a npy int binary file.
uhandle = os.path.join(data.dirs.across, data.name+".utemp.sort")
bseeds = os.path.join(data.dirs.across, data.name+".tmparrs.h5")
## send as first async1 job
async1 = lbview.apply(get_seeds_and_hits, *(uhandle, bseeds, snames))
async2 = lbview.apply(fill_dups_arr, data)
## progress bar for seed/hit sorting
while not (async1.ready() and async2.ready()):
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(20, 0, printstr.format(elapsed))
time.sleep(0.1)
if not async1.successful():
raise IPyradWarningExit("error in get_seeds: %s", async1.exception())
if not async2.successful():
raise IPyradWarningExit("error in fill_dups: %s", async2.exception())
## make a limited njobs view based on mem limits
## is using smallview necessary? (yes, it is for bad libraries)
smallview = ipyclient.load_balanced_view(targets=ipyclient.ids[::2])
## make sure there are no old tmp.h5 files
smpios = [os.path.join(data.dirs.across, sample.name+'.tmp.h5') \
for sample in samples]
for smpio in smpios:
if os.path.exists(smpio):
os.remove(smpio)
## send 'singlecat()' jobs to engines
jobs = {}
for sample in samples:
sidx = snames.index(sample.name)
jobs[sample.name] = smallview.apply(singlecat, *(data, sample, bseeds, sidx))
## check for finished and submit disk-writing job when finished
alljobs = len(jobs)
while 1:
## check for finished jobs
curkeys = jobs.keys()
for key in curkeys:
async = jobs[key]
if async.ready():
if async.successful():
## submit cleanup for finished job
args = (data, data.samples[key], snames.index(key))
with lbview.temp_flags(after=cleanups[last_sample]):
cleanups[key] = lbview.apply(write_to_fullarr, *args)
last_sample = key
del jobs[key]
else:
err = jobs[key].exception()
errmsg = "singlecat error: {} {}".format(key, err)
raise IPyradWarningExit(errmsg)
## print progress or break
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(alljobs, alljobs-len(jobs), printstr.format(elapsed))
time.sleep(0.1)
if not jobs:
break
## add the dask_chroms func for reference data
if 'reference' in data.paramsdict["assembly_method"]:
with lbview.temp_flags(after=cleanups.values()):
cleanups['ref'] = lbview.apply(dask_chroms, *(data, samples))
## wait for "write_to_fullarr" jobs to finish
print("")
start = time.time()
printstr = " building database | {} | s6 |"
while 1:
finished = [i for i in cleanups.values() if i.ready()]
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(len(cleanups), len(finished), printstr.format(elapsed))
time.sleep(0.1)
## break if one failed, or if finished
if not all([i.successful() for i in finished]):
break
if len(cleanups) == len(finished):
break
## check for errors
for job in cleanups:
if cleanups[job].ready():
if not cleanups[job].successful():
err = " error in write_to_fullarr ({}) {}"\
.format(job, cleanups[job].result())
LOGGER.error(err)
raise IPyradWarningExit(err)
## remove large indels array file and singlecat tmparr file
ifile = os.path.join(data.dirs.across, data.name+".tmp.indels.hdf5")
if os.path.exists(ifile):
os.remove(ifile)
if os.path.exists(bseeds):
os.remove(bseeds)
for sh5 in [os.path.join(data.dirs.across, i.name+".tmp.h5") for i in samples]:
os.remove(sh5)
## print final progress
elapsed = datetime.timedelta(seconds=int(time.time() - start))
progressbar(10, 10, printstr.format(elapsed))
print("") | Runs singlecat and cleanup jobs for each sample.
For each sample this fills its own hdf5 array with catg data & indels.
This is messy, could use simplifiying. |
def error_messages(self, driver_id=None):
"""Get the error messages for all drivers or a specific driver.
Args:
driver_id: The specific driver to get the errors for. If this is
None, then this method retrieves the errors for all drivers.
Returns:
A dictionary mapping driver ID to a list of the error messages for
that driver.
"""
if driver_id is not None:
assert isinstance(driver_id, ray.DriverID)
return self._error_messages(driver_id)
error_table_keys = self.redis_client.keys(
ray.gcs_utils.TablePrefix_ERROR_INFO_string + "*")
driver_ids = [
key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):]
for key in error_table_keys
]
return {
binary_to_hex(driver_id): self._error_messages(
ray.DriverID(driver_id))
for driver_id in driver_ids
} | Get the error messages for all drivers or a specific driver.
Args:
driver_id: The specific driver to get the errors for. If this is
None, then this method retrieves the errors for all drivers.
Returns:
A dictionary mapping driver ID to a list of the error messages for
that driver. |
def check_pdb_status(pdbid):
"""Returns the status and up-to-date entry in the PDB for a given PDB ID"""
url = 'http://www.rcsb.org/pdb/rest/idStatus?structureId=%s' % pdbid
xmlf = urlopen(url)
xml = et.parse(xmlf)
xmlf.close()
status = None
current_pdbid = pdbid
for df in xml.xpath('//record'):
status = df.attrib['status'] # Status of an entry can be either 'UNKWOWN', 'OBSOLETE', or 'CURRENT'
if status == 'OBSOLETE':
current_pdbid = df.attrib['replacedBy'] # Contains the up-to-date PDB ID for obsolete entries
return [status, current_pdbid.lower()] | Returns the status and up-to-date entry in the PDB for a given PDB ID |
def run(self, node, client):
"""
Upload the file, retaining permissions
See also L{Deployment.run}
"""
perms = os.stat(self.source).st_mode
client.put(path=self.target, chmod=perms,
contents=open(self.source, 'rb').read())
return node | Upload the file, retaining permissions
See also L{Deployment.run} |
def strptime(cls, value, format):
""" Parse a datetime string using the provided format.
This also emulates `%z` support on Python 2.
:param value: Datetime string
:type value: str
:param format: Format to use for parsing
:type format: str
:rtype: datetime
:raises ValueError: Invalid format
:raises TypeError: Invalid input type
"""
# Simplest case: direct parsing
if cls.python_supports_z or '%z' not in format:
return datetime.strptime(value, format)
else:
# %z emulation case
assert format[-2:] == '%z', 'For performance, %z is only supported at the end of the string'
# Parse
dt = datetime.strptime(value[:-5], format[:-2]) # cutoff '%z' and '+0000'
tz = FixedOffset(value[-5:]) # parse %z into tzinfo
# Localize
return dt.replace(tzinfo=tz) | Parse a datetime string using the provided format.
This also emulates `%z` support on Python 2.
:param value: Datetime string
:type value: str
:param format: Format to use for parsing
:type format: str
:rtype: datetime
:raises ValueError: Invalid format
:raises TypeError: Invalid input type |
def _opbend_transform_mean(rs, fn_low, deriv=0):
"""Compute the mean of the 3 opbends
"""
v = 0.0
d = np.zeros((4,3), float)
dd = np.zeros((4,3,4,3), float)
#loop over the 3 cyclic permutations
for p in np.array([[0,1,2], [2,0,1], [1,2,0]]):
opbend = _opbend_transform([rs[p[0]], rs[p[1]], rs[p[2]], rs[3]], fn_low, deriv)
v += opbend[0]/3
index0 = np.where(p==0)[0][0] #index0 is the index of the 0th atom (rs[0])
index1 = np.where(p==1)[0][0]
index2 = np.where(p==2)[0][0]
index3 = 3
if deriv>0:
d[0] += opbend[1][index0]/3
d[1] += opbend[1][index1]/3
d[2] += opbend[1][index2]/3
d[3] += opbend[1][index3]/3
if deriv>1:
dd[0, :, 0, :] += opbend[2][index0, :, index0, :]/3
dd[0, :, 1, :] += opbend[2][index0, :, index1, :]/3
dd[0, :, 2, :] += opbend[2][index0, :, index2, :]/3
dd[0, :, 3, :] += opbend[2][index0, :, index3, :]/3
dd[1, :, 0, :] += opbend[2][index1, :, index0, :]/3
dd[1, :, 1, :] += opbend[2][index1, :, index1, :]/3
dd[1, :, 2, :] += opbend[2][index1, :, index2, :]/3
dd[1, :, 3, :] += opbend[2][index1, :, index3, :]/3
dd[2, :, 0, :] += opbend[2][index2, :, index0, :]/3
dd[2, :, 1, :] += opbend[2][index2, :, index1, :]/3
dd[2, :, 2, :] += opbend[2][index2, :, index2, :]/3
dd[2, :, 3, :] += opbend[2][index2, :, index3, :]/3
dd[3, :, 0, :] += opbend[2][index3, :, index0, :]/3
dd[3, :, 1, :] += opbend[2][index3, :, index1, :]/3
dd[3, :, 2, :] += opbend[2][index3, :, index2, :]/3
dd[3, :, 3, :] += opbend[2][index3, :, index3, :]/3
if deriv==0:
return v,
elif deriv==1:
return v, d
elif deriv==2:
return v, d, dd
else:
raise ValueError("deriv must be 0, 1 or 2.") | Compute the mean of the 3 opbends |
def build_next_url(self, url):
"""Builds next url in a format compatible with cousteau. Path + query"""
if not url:
if self.split_urls: # If we had a long request give the next part
self.total_count_flag = False # Reset flag for count
return self.split_urls.pop(0)
else:
return None
parsed_url = urlparse(url)
return "{0}?{1}".format(parsed_url.path, parsed_url.query) | Builds next url in a format compatible with cousteau. Path + query |
def variant(case_id, variant_id):
"""Show a single variant."""
case_obj = app.db.case(case_id)
variant = app.db.variant(case_id, variant_id)
if variant is None:
return abort(404, "variant not found")
comments = app.db.comments(variant_id=variant.md5)
template = 'sv_variant.html' if app.db.variant_type == 'sv' else 'variant.html'
return render_template(template, variant=variant, case_id=case_id,
comments=comments, case=case_obj) | Show a single variant. |
def is_valid_path(path):
"""
:return: True if the path is valid, else raise a ValueError with the
specific error
"""
if not path.startswith('/'):
msg = 'Invalid path "%s". Paths need to start with "/".'
raise ValueError(msg % path[:40])
for c in ' \t':
if c in path:
msg = ('Invalid character "%s" found in path. Paths need to be'
' URL-encoded.')
raise ValueError(msg % c)
return True | :return: True if the path is valid, else raise a ValueError with the
specific error |
def docker(ctx, docker_run_args, docker_image, nvidia, digest, jupyter, dir, no_dir, shell, port, cmd, no_tty):
"""W&B docker lets you run your code in a docker image ensuring wandb is configured. It adds the WANDB_DOCKER and WANDB_API_KEY
environment variables to your container and mounts the current directory in /app by default. You can pass additional
args which will be added to `docker run` before the image name is declared, we'll choose a default image for you if
one isn't passed:
wandb docker -v /mnt/dataset:/app/data
wandb docker gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-cpu:v0.4.0 --jupyter
wandb docker wandb/deepo:keras-gpu --no-tty --cmd "python train.py --epochs=5"
By default we override the entrypoint to check for the existance of wandb and install it if not present. If you pass the --jupyter
flag we will ensure jupyter is installed and start jupyter lab on port 8888. If we detect nvidia-docker on your system we will use
the nvidia runtime. If you just want wandb to set environment variable to an existing docker run command, see the wandb docker-run
command.
"""
if not find_executable('docker'):
raise ClickException(
"Docker not installed, install it from https://docker.com" )
args = list(docker_run_args)
image = docker_image or ""
# remove run for users used to nvidia-docker
if len(args) > 0 and args[0] == "run":
args.pop(0)
if image == "" and len(args) > 0:
image = args.pop(0)
# If the user adds docker args without specifying an image (should be rare)
if not util.docker_image_regex(image.split("@")[0]):
if image:
args = args + [image]
image = wandb.docker.default_image(gpu=nvidia)
subprocess.call(["docker", "pull", image])
_, repo_name, tag = wandb.docker.parse(image)
resolved_image = wandb.docker.image_id(image)
if resolved_image is None:
raise ClickException(
"Couldn't find image locally or in a registry, try running `docker pull %s`" % image)
if digest:
sys.stdout.write(resolved_image)
exit(0)
existing = wandb.docker.shell(
["ps", "-f", "ancestor=%s" % resolved_image, "-q"])
if existing:
question = {
'type': 'confirm',
'name': 'attach',
'message': "Found running container with the same image, do you want to attach?",
}
result = whaaaaat.prompt([question])
if result and result['attach']:
subprocess.call(['docker', 'attach', existing.split("\n")[0]])
exit(0)
cwd = os.getcwd()
command = ['docker', 'run', '-e', 'LANG=C.UTF-8', '-e', 'WANDB_DOCKER=%s' % resolved_image, '--ipc=host',
'-v', wandb.docker.entrypoint+':/wandb-entrypoint.sh', '--entrypoint', '/wandb-entrypoint.sh']
if nvidia:
command.extend(['--runtime', 'nvidia'])
if not no_dir:
#TODO: We should default to the working directory if defined
command.extend(['-v', cwd+":"+dir, '-w', dir])
if api.api_key:
command.extend(['-e', 'WANDB_API_KEY=%s' % api.api_key])
else:
wandb.termlog("Couldn't find WANDB_API_KEY, run `wandb login` to enable streaming metrics")
if jupyter:
command.extend(['-e', 'WANDB_ENSURE_JUPYTER=1', '-p', port+':8888'])
no_tty = True
cmd = "jupyter lab --no-browser --ip=0.0.0.0 --allow-root --NotebookApp.token= --notebook-dir %s" % dir
command.extend(args)
if no_tty:
command.extend([image, shell, "-c", cmd])
else:
if cmd:
command.extend(['-e', 'WANDB_COMMAND=%s' % cmd])
command.extend(['-it', image, shell])
wandb.termlog("Launching docker container \U0001F6A2")
subprocess.call(command) | W&B docker lets you run your code in a docker image ensuring wandb is configured. It adds the WANDB_DOCKER and WANDB_API_KEY
environment variables to your container and mounts the current directory in /app by default. You can pass additional
args which will be added to `docker run` before the image name is declared, we'll choose a default image for you if
one isn't passed:
wandb docker -v /mnt/dataset:/app/data
wandb docker gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-cpu:v0.4.0 --jupyter
wandb docker wandb/deepo:keras-gpu --no-tty --cmd "python train.py --epochs=5"
By default we override the entrypoint to check for the existance of wandb and install it if not present. If you pass the --jupyter
flag we will ensure jupyter is installed and start jupyter lab on port 8888. If we detect nvidia-docker on your system we will use
the nvidia runtime. If you just want wandb to set environment variable to an existing docker run command, see the wandb docker-run
command. |
def _LogProgressUpdateIfReasonable(self):
"""Prints a progress update if enough time has passed."""
next_log_time = (
self._time_of_last_status_log +
self.SECONDS_BETWEEN_STATUS_LOG_MESSAGES)
current_time = time.time()
if current_time < next_log_time:
return
completion_time = time.ctime(current_time + self.EstimateTimeRemaining())
log_message = (
'{0:s} hash analysis plugin running. {1:d} hashes in queue, '
'estimated completion time {2:s}.'.format(
self.NAME, self.hash_queue.qsize(), completion_time))
logger.info(log_message)
self._time_of_last_status_log = current_time | Prints a progress update if enough time has passed. |
def dependencies(request, ident, stateless=False, **kwargs):
'Return the dependencies'
_, app = DashApp.locate_item(ident, stateless)
with app.app_context():
view_func = app.locate_endpoint_function('dash-dependencies')
resp = view_func()
return HttpResponse(resp.data,
content_type=resp.mimetype) | Return the dependencies |
def save_imglist(self, fname=None, root=None, shuffle=False):
"""
save imglist to disk
Parameters:
----------
fname : str
saved filename
"""
def progress_bar(count, total, suffix=''):
import sys
bar_len = 24
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
str_list = []
for index in range(self.num_images):
progress_bar(index, self.num_images)
label = self.label_from_index(index)
if label.size < 1:
continue
path = self.image_path_from_index(index)
if root:
path = osp.relpath(path, root)
str_list.append('\t'.join([str(index), str(2), str(label.shape[1])] \
+ ["{0:.4f}".format(x) for x in label.ravel()] + [path,]) + '\n')
if str_list:
if shuffle:
import random
random.shuffle(str_list)
if not fname:
fname = self.name + '.lst'
with open(fname, 'w') as f:
for line in str_list:
f.write(line)
else:
raise RuntimeError("No image in imdb") | save imglist to disk
Parameters:
----------
fname : str
saved filename |
def name(self):
"""
Get the module name
:return: Module name
:rtype: str | unicode
"""
res = type(self).__name__
if self._id:
res += ".{}".format(self._id)
return res | Get the module name
:return: Module name
:rtype: str | unicode |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'last_updated') and self.last_updated is not None:
_dict['last_updated'] = datetime_to_string(self.last_updated)
return _dict | Return a json dictionary representing this model. |
def del_option(self, section, option):
""" Deletes an option if the section and option exist """
if self.config.has_section(section):
if self.config.has_option(section, option):
self.config.remove_option(section, option)
return (True, self.config.options(section))
return (False, 'Option: ' + option + ' does not exist')
return (False, 'Section: ' + section + ' does not exist') | Deletes an option if the section and option exist |
def load_method(path,method,class_name = None,instance_creator = None):
'''
Returns an instance of the method specified.
Args :
path : The path to the module contianing the method or function.
method : The name of the function.
class_name : The name of the class if the funtion is a method.
instance_creator: The name of the method to return the class instance.
'''
#Load the module
module = load_module(path)
if class_name :
#If a class, Create an instance
class_type = getattr(module, class_name)
if instance_creator:
ic_rest = instance_creator
nxt = module
while ('.' in ic_rest) :
nxt = getattr(nxt , instance_creator.split('.')[0])
ic_rest = '.'.join(ic_rest.split('.')[1:])
instance = getattr(module, instance_creator)()
else :
instance = class_type()
return getattr(instance , method)
else :
return getattr(module , method) | Returns an instance of the method specified.
Args :
path : The path to the module contianing the method or function.
method : The name of the function.
class_name : The name of the class if the funtion is a method.
instance_creator: The name of the method to return the class instance. |
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback() | Rollback the changes previously made by remove(). |
def filter_by_moys(self, moys):
"""Filter the Data Collection based on a list of minutes of the year.
Args:
moys: A List of minutes of the year [0..8759 * 60]
Return:
A new Data Collection with filtered data
"""
_filt_values, _filt_datetimes = self._filter_by_moys_slow(moys)
collection = HourlyDiscontinuousCollection(
self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = self._validated_a_period
return collection | Filter the Data Collection based on a list of minutes of the year.
Args:
moys: A List of minutes of the year [0..8759 * 60]
Return:
A new Data Collection with filtered data |
def _build_vocab(filename, vocab_dir, vocab_name):
"""Reads a file to build a vocabulary.
Args:
filename: file to read list of words from.
vocab_dir: directory where to save the vocabulary.
vocab_name: vocab file name.
Returns:
text encoder.
"""
vocab_path = os.path.join(vocab_dir, vocab_name)
if not tf.gfile.Exists(vocab_path):
with tf.gfile.GFile(filename, "r") as f:
data = f.read().split()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
encoder = text_encoder.TokenTextEncoder(None, vocab_list=words)
encoder.store_to_file(vocab_path)
else:
encoder = text_encoder.TokenTextEncoder(vocab_path)
return encoder | Reads a file to build a vocabulary.
Args:
filename: file to read list of words from.
vocab_dir: directory where to save the vocabulary.
vocab_name: vocab file name.
Returns:
text encoder. |
def load(self, *objs, consistent=False):
"""Populate objects from DynamoDB.
:param objs: objects to delete.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column.
:raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
"""
get_table_name = self._compute_table_name
objs = set(objs)
validate_not_abstract(*objs)
table_index, object_index, request = {}, {}, {}
for obj in objs:
table_name = get_table_name(obj.__class__)
key = dump_key(self, obj)
index = index_for(key)
if table_name not in object_index:
table_index[table_name] = list(sorted(key.keys()))
object_index[table_name] = {}
request[table_name] = {"Keys": [], "ConsistentRead": consistent}
if index not in object_index[table_name]:
request[table_name]["Keys"].append(key)
object_index[table_name][index] = set()
object_index[table_name][index].add(obj)
response = self.session.load_items(request)
for table_name, list_of_attrs in response.items():
for attrs in list_of_attrs:
key_shape = table_index[table_name]
key = extract_key(key_shape, attrs)
index = index_for(key)
for obj in object_index[table_name].pop(index):
unpack_from_dynamodb(
attrs=attrs, expected=obj.Meta.columns, engine=self, obj=obj)
object_loaded.send(self, engine=self, obj=obj)
if not object_index[table_name]:
object_index.pop(table_name)
if object_index:
not_loaded = set()
for index in object_index.values():
for index_set in index.values():
not_loaded.update(index_set)
logger.info("loaded {} of {} objects".format(len(objs) - len(not_loaded), len(objs)))
raise MissingObjects("Failed to load some objects.", objects=not_loaded)
logger.info("successfully loaded {} objects".format(len(objs))) | Populate objects from DynamoDB.
:param objs: objects to delete.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column.
:raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html |
def on_copy_local(self, pair):
"""Called when the local resource should be copied to remote."""
status = pair.remote_classification
self._log_action("copy", status, ">", pair.local) | Called when the local resource should be copied to remote. |
def _TypecheckDecorator(subject=None, **kwargs):
"""Dispatches type checks based on what the subject is.
Functions or methods are annotated directly. If this method is called
with keyword arguments only, return a decorator.
"""
if subject is None:
return _TypecheckDecoratorFactory(kwargs)
elif inspect.isfunction(subject) or inspect.ismethod(subject):
return _TypecheckFunction(subject, {}, 2, None)
else:
raise TypeError() | Dispatches type checks based on what the subject is.
Functions or methods are annotated directly. If this method is called
with keyword arguments only, return a decorator. |
def polygonVertices(x, y, radius, sides, rotationDegrees=0, stretchHorizontal=1.0, stretchVertical=1.0):
"""
Returns a generator that produces the (x, y) points of the vertices of a regular polygon.
`x` and `y` mark the center of the polygon, `radius` indicates the size,
`sides` specifies what kind of polygon it is.
Odd-sided polygons have a pointed corner at the top and flat horizontal
side at the bottom. The `rotationDegrees` argument will rotate the polygon
counterclockwise.
The polygon can be stretched by passing `stretchHorizontal` or `stretchVertical`
arguments. Passing `2.0` for `stretchHorizontal`, for example, will double with
width of the polygon.
If `filled` is set to `True`, the generator will also produce the interior
(x, y) points.
(Note: The `thickness` parameter is not yet implemented.)
>>> list(polygonVertices(10, 10, 8, 5))
[(10, 2.0), (3, 8.0), (6, 16.0), (14, 16.0), (17, 8.0)]
>>> drawPoints(polygonVertices(10, 10, 8, 5))
,,,,,,,O,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
O,,,,,,,,,,,,,O
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,O,,,,,,,O,,,
>>> drawPoints(polygonVertices(10, 10, 8, 5, rotationDegrees=20))
,,,,,O,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,O
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
O,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,O
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,O,,,,,,,,
"""
# TODO - validate x, y, radius, sides
# Setting the start point like this guarantees a flat side will be on the "bottom" of the polygon.
if sides % 2 == 1:
angleOfStartPointDegrees = 90 + rotationDegrees
else:
angleOfStartPointDegrees = 90 + rotationDegrees - (180 / sides)
for sideNum in range(sides):
angleOfPointRadians = math.radians(angleOfStartPointDegrees + (360 / sides * sideNum))
yield ( int(math.cos(angleOfPointRadians) * radius * stretchHorizontal) + x,
-(int(math.sin(angleOfPointRadians) * radius) * stretchVertical) + y) | Returns a generator that produces the (x, y) points of the vertices of a regular polygon.
`x` and `y` mark the center of the polygon, `radius` indicates the size,
`sides` specifies what kind of polygon it is.
Odd-sided polygons have a pointed corner at the top and flat horizontal
side at the bottom. The `rotationDegrees` argument will rotate the polygon
counterclockwise.
The polygon can be stretched by passing `stretchHorizontal` or `stretchVertical`
arguments. Passing `2.0` for `stretchHorizontal`, for example, will double with
width of the polygon.
If `filled` is set to `True`, the generator will also produce the interior
(x, y) points.
(Note: The `thickness` parameter is not yet implemented.)
>>> list(polygonVertices(10, 10, 8, 5))
[(10, 2.0), (3, 8.0), (6, 16.0), (14, 16.0), (17, 8.0)]
>>> drawPoints(polygonVertices(10, 10, 8, 5))
,,,,,,,O,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
O,,,,,,,,,,,,,O
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,O,,,,,,,O,,,
>>> drawPoints(polygonVertices(10, 10, 8, 5, rotationDegrees=20))
,,,,,O,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,O
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
O,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,,,,,,,,,O
,,,,,,,,,,,,,,
,,,,,,,,,,,,,,
,,,,,O,,,,,,,, |
def transitivity_wu(W):
'''
Transitivity is the ratio of 'triangles to triplets' in the network.
(A classical version of the clustering coefficient).
Parameters
----------
W : NxN np.ndarray
weighted undirected connection matrix
Returns
-------
T : int
transitivity scalar
'''
K = np.sum(np.logical_not(W == 0), axis=1)
ws = cuberoot(W)
cyc3 = np.diag(np.dot(ws, np.dot(ws, ws)))
return np.sum(cyc3, axis=0) / np.sum(K * (K - 1), axis=0) | Transitivity is the ratio of 'triangles to triplets' in the network.
(A classical version of the clustering coefficient).
Parameters
----------
W : NxN np.ndarray
weighted undirected connection matrix
Returns
-------
T : int
transitivity scalar |
def require_remote_ref_path(func):
"""A decorator raising a TypeError if we are not a valid remote, based on the path"""
def wrapper(self, *args):
if not self.is_remote():
raise ValueError("ref path does not point to a remote reference: %s" % self.path)
return func(self, *args)
# END wrapper
wrapper.__name__ = func.__name__
return wrapper | A decorator raising a TypeError if we are not a valid remote, based on the path |
def build_parser():
"""
_build_parser_
Set up CLI parser options, parse the
CLI options an return the parsed results
"""
parser = argparse.ArgumentParser(
description='dockerstache templating util'
)
parser.add_argument(
'--output', '-o',
help='Working directory to render dockerfile and templates',
dest='output',
default=None
)
parser.add_argument(
'--input', '-i',
help='Working directory containing dockerfile and script mustache templates',
dest='input',
default=os.getcwd()
)
parser.add_argument(
'--context', '-c',
help='JSON file containing context dictionary to render templates',
dest='context',
default=None
)
parser.add_argument(
'--defaults', '-d',
help='JSON file containing default context dictionary to render templates',
dest='defaults',
default=None
)
parser.add_argument(
'--inclusive',
help='include non .mustache files from template',
default=False,
action='store_true'
)
parser.add_argument(
'--exclude', '-e',
help='exclude files from template in this list',
default=[],
nargs='+'
)
opts = parser.parse_args()
return vars(opts) | _build_parser_
Set up CLI parser options, parse the
CLI options an return the parsed results |
def local_replace(self, dt, use_dst=True, _recurse=False, **kwds):
"""Return pywws timestamp (utc, no tzinfo) for the most recent
local time before the pywws timestamp dt, with datetime replace
applied.
"""
local_time = dt + self.standard_offset
if use_dst:
dst_offset = self.dst(local_time)
if dst_offset:
local_time += dst_offset
adjusted_time = local_time.replace(**kwds)
if adjusted_time > local_time and not _recurse:
return self.local_replace(
dt - DAY, use_dst=use_dst, _recurse=True, **kwds)
adjusted_time -= dst_offset
if self.dst(adjusted_time):
return adjusted_time - self.standard_offset
adjusted_time = local_time.replace(**kwds)
if use_dst:
dst_offset = self.dst(adjusted_time)
adjusted_time -= dst_offset
if adjusted_time > local_time and not _recurse:
return self.local_replace(
dt - DAY, use_dst=use_dst, _recurse=True, **kwds)
return adjusted_time - self.standard_offset | Return pywws timestamp (utc, no tzinfo) for the most recent
local time before the pywws timestamp dt, with datetime replace
applied. |
def read(file, system):
"""Parse an ANDES card file into internal variables"""
try:
fid = open(file, 'r')
raw_file = fid.readlines()
except IOError:
print('* IOError while reading input card file.')
return
ret_dict = dict()
ret_dict['outfile'] = file.split('.')[0].lower() + '.py'
key, val = None, None
for idx, line in enumerate(raw_file):
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
elif '#' in line:
line = line.split('#')[0]
if '=' in line: # defining a field
key, val = line.split('=')
key, val = key.strip(), val.strip()
val = [] if val == '' else val
ret_dict.update({key: val})
if val:
val = val.split(';')
else:
val.extend(line.split(';'))
if val:
val = de_blank(val)
ret_dict[key] = val
ret_dict_ord = dict(ret_dict)
for key, val in ret_dict.items():
if not val:
continue
if type(val) == list:
if ':' in val[0]:
new_val = {} # return in a dictionary
new_val_ord = [
] # return in an ordered list with the dict keys at 0
for item in val:
try:
m, n = item.split(':')
except ValueError:
print('* Error: check line <{}>'.format(item))
return
m, n = m.strip(), n.strip()
if ',' in n:
n = n.split(',')
n = de_blank(n)
n = [to_number(i) for i in n]
else:
n = to_number(n)
new_val.update({m.strip(): n})
new_val_ord.append([m.strip(), n])
ret_dict[key] = new_val
ret_dict_ord[key] = new_val_ord
ret_dict['name'] = ret_dict['name'][0]
ret_dict['doc_string'] = ret_dict['doc_string'][0]
ret_dict['group'] = ret_dict['group'][0]
ret_dict['service_keys'] = list(ret_dict['service_eq'].keys())
ret_dict['consts'] = list(ret_dict['data'].keys()) + list(
ret_dict['service_eq'].keys())
ret_dict['init1_eq'] = ret_dict_ord['init1_eq']
ret_dict['service_eq'] = ret_dict_ord['service_eq']
ret_dict['ctrl'] = ret_dict_ord['ctrl']
copy_algebs = []
copy_states = []
for item in ret_dict['ctrl']:
key, val = item
if val[3] == 'y':
copy_algebs.append(key)
elif val[3] == 'x':
copy_states.append(key)
elif val[3] == 'c':
ret_dict['consts'].append(key)
ret_dict['copy_algebs'] = copy_algebs
ret_dict['copy_states'] = copy_states
return run(system, **ret_dict) | Parse an ANDES card file into internal variables |
def call(command, silent=False):
""" Runs a bash command safely, with shell=false, catches any non-zero
return codes. Raises slightly modified CalledProcessError exceptions
on failures.
Note: command is a string and cannot include pipes."""
try:
if silent:
with open(os.devnull, 'w') as FNULL:
return subprocess.check_call(command_to_array(command), stdout=FNULL)
else:
# Using the defaults, shell=False, no i/o redirection.
return check_call(command_to_array(command))
except CalledProcessError as e:
# We are modifying the error itself for 2 reasons. 1) it WILL contain
# login credentials when run_mongodump is run, 2) CalledProcessError is
# slightly not-to-spec (the message variable is blank), which means
# cronutils.ErrorHandler would report unlabeled stack traces.
e.message = "%s failed with error code %s" % (e.cmd[0], e.returncode)
e.cmd = e.cmd[0] + " [arguments stripped for security]"
raise e | Runs a bash command safely, with shell=false, catches any non-zero
return codes. Raises slightly modified CalledProcessError exceptions
on failures.
Note: command is a string and cannot include pipes. |
def find_mecab_dictionary(names):
"""
Find a MeCab dictionary with a given name. The dictionary has to be
installed separately -- see wordfreq's README for instructions.
"""
suggested_pkg = names[0]
paths = [
os.path.expanduser('~/.local/lib/mecab/dic'),
'/var/lib/mecab/dic',
'/var/local/lib/mecab/dic',
'/usr/lib/mecab/dic',
'/usr/local/lib/mecab/dic',
'/usr/lib/x86_64-linux-gnu/mecab/dic',
]
full_paths = [os.path.join(path, name) for path in paths for name in names]
checked_paths = [path for path in full_paths if len(path) <= MAX_PATH_LENGTH]
for path in checked_paths:
if os.path.exists(path):
return path
error_lines = [
"Couldn't find the MeCab dictionary named %r." % suggested_pkg,
"You should download or use your system's package manager to install",
"the %r package." % suggested_pkg,
"",
"We looked in the following locations:"
] + ["\t%s" % path for path in checked_paths]
skipped_paths = [path for path in full_paths if len(path) > MAX_PATH_LENGTH]
if skipped_paths:
error_lines += [
"We had to skip these paths that are too long for MeCab to find:",
] + ["\t%s" % path for path in skipped_paths]
raise OSError('\n'.join(error_lines)) | Find a MeCab dictionary with a given name. The dictionary has to be
installed separately -- see wordfreq's README for instructions. |
def plot_shade_mask(ax, ind, mask, facecolor='gray', alpha=0.5):
'''Shade across x values where boolean mask is `True`
Args
----
ax: pyplot.ax
Axes object to plot with a shaded region
ind: ndarray
The indices to use for the x-axis values of the data
mask: ndarray
Boolean mask array to determine which regions should be shaded
facecolor: matplotlib color
Color of the shaded area
Returns
-------
ax: pyplot.ax
Axes object with the shaded region added
'''
ymin, ymax = ax.get_ylim()
ax.fill_between(ind, ymin, ymax, where=mask,
facecolor=facecolor, alpha=alpha)
return ax | Shade across x values where boolean mask is `True`
Args
----
ax: pyplot.ax
Axes object to plot with a shaded region
ind: ndarray
The indices to use for the x-axis values of the data
mask: ndarray
Boolean mask array to determine which regions should be shaded
facecolor: matplotlib color
Color of the shaded area
Returns
-------
ax: pyplot.ax
Axes object with the shaded region added |
def _is_accepted(self, element_tag):
'''Return if the link is accepted by the filters.'''
element_tag = element_tag.lower()
if self._ignored_tags is not None \
and element_tag in self._ignored_tags:
return False
if self._followed_tags is not None:
return element_tag in self._followed_tags
else:
return True | Return if the link is accepted by the filters. |
def in_builddir(sub='.'):
"""
Decorate a project phase with a local working directory change.
Args:
sub: An optional subdirectory to change into.
"""
from functools import wraps
def wrap_in_builddir(func):
"""Wrap the function for the new build directory."""
@wraps(func)
def wrap_in_builddir_func(self, *args, **kwargs):
"""The actual function inside the wrapper for the new builddir."""
p = local.path(self.builddir) / sub
if not p.exists():
LOG.error("%s does not exist.", p)
if p == local.cwd:
LOG.debug("CWD already is %s", p)
return func(self, *args, *kwargs)
with local.cwd(p):
return func(self, *args, **kwargs)
return wrap_in_builddir_func
return wrap_in_builddir | Decorate a project phase with a local working directory change.
Args:
sub: An optional subdirectory to change into. |
def system(self) -> 'EFBChat':
"""
Set the chat as a system chat.
Only set for channel-level and group-level system chats.
Returns:
EFBChat: This object.
"""
self.chat_name = "System"
self.chat_alias = None
self.chat_uid = EFBChat.SYSTEM_ID
self.chat_type = ChatType.System
return self | Set the chat as a system chat.
Only set for channel-level and group-level system chats.
Returns:
EFBChat: This object. |
def _fake_openreferenceinstances(self, namespace, **params):
"""
Implements WBEM server responder for
:meth:`~pywbem.WBEMConnection.OpenReferenceInstances`
with data from the instance repository.
"""
self._validate_namespace(namespace)
self._validate_open_params(**params)
params['ObjectName'] = params['InstanceName']
del params['InstanceName']
result = self._fake_references(namespace, **params)
objects = [] if result is None else [x[2] for x in result[0][2]]
return self._open_response(objects, namespace,
'PullInstancesWithPath', **params) | Implements WBEM server responder for
:meth:`~pywbem.WBEMConnection.OpenReferenceInstances`
with data from the instance repository. |
def pre_check(self, data):
"""Count chars, words and sentences in the text."""
sentences = len(re.findall('[\.!?]+\W+', data)) or 1
chars = len(data) - len(re.findall('[^a-zA-Z0-9]', data))
num_words = len(re.findall('\s+', data))
data = re.split('[^a-zA-Z]+', data)
return data, sentences, chars, num_words | Count chars, words and sentences in the text. |
def get(self, cycle_list, dataitem=None, isotope=None, sparse=1):
'''
Get Data from HDF5 files.
There are three ways to call this function
1. get(dataitem)
Fetches the datatiem for all cycles. If dataitem is a header
attribute or list of attributes then the data is retured.
If detaitem an individulal or list of column attributes,
data columns or isotopes/elements the data is returned for
all cycles.
2. get(cycle_list, dataitem)
Fetches the dataitem or list of dataitems for the cycle
or list of cycles. The variable dataitems can contain column
attributes, data columns, and isotopes/elemnts.
3. get(cycle_list, dataitem, isotope)
Fetches the dataitems like the seccond method except that
one of the dataitems must be either "iso_massf" or "yps",
and in the data returned "iso_massf" and "yps" are replaced
with the data from the isotopes. The isotopes must be in
the form given by se.isotopes or se.elements.
Parameters
----------
cycle_list : list, integer or string
If cycle_list is a list or string and all of the entries
are header attributes then the attributes are returned.
If cycle_list is a list or string of dataitems then the
dataitems are fetched for all cycles.
If cycle_list is a list, integer or string of cycle numbers
then data is returned for those cycles.
dataitem: list or string, optional
If dataitem is not None then the data for each item is
returned for the cycle or list of cycles. dataitem may be an
individual or a mixed list of column attributes, column
data or isotopes/elements. If dataitem is None then
cycle_list must be a string. The default is None.
isotope: list or string, optional
If one of the dataitems is "iso_massf" or "yps" then it is
replaced with the data from the individual isotopes/elements
listed in isotope. The default is None.
sparse : int
Implements a sparsity factor on the fetched data i.e. only
the i th cycle in cycle_list data is returned,
where i = sparse.
'''
# Check out the inputs
t1=time.time()
isotopes_of_interest = []
nested_list = False
# if one of cycle_list, dataitem or isotope is given as a string convert it to a list
if isinstance(cycle_list, basestring):
cycle_list = [cycle_list]
else:
try:
if len(cycle_list) == 1:
nested_list = True
except TypeError:
pass #leave nested_list as false
if isinstance(dataitem, basestring):
dataitem = [dataitem]
if isinstance(isotope, basestring):
isotope = [isotope]
if dataitem==None and isotope==None:
option_ind = 1
dataitem = cycle_list
if not any([item in self.hattrs for item in dataitem]):
cycle_list = self.cycles
else:
first_file = mrT.File(self.h5s[0].filename,'r')
dat = []
# get all dataitems from header attributes
for item in dataitem:
tmp = first_file.attrs.get(item, None)
try:
if len(tmp) == 1:
tmp = tmp[0]
except TypeError: #if a scaler is returned do nothing
pass
dat.append(tmp)
# if only one header attribute is required dont return as a list
if (len(dat) == 1) and (not nested_list):
dat = dat[0]
first_file.close()
return dat
if any([item.split('-')[0] in self.isos for item in dataitem]):
return self.get(cycle_list,dataitem,sparse=sparse)
elif isotope==None:
option_ind = 2
cycle_list = cycle_list
dataitem = dataitem
# if one dataitem is given as a string convert it to a list
if isinstance(dataitem, basestring):
dataitem = [dataitem]
new_dataitem = []
new_isotopes = []
for item in dataitem:
if item.split('-')[0] in self.isos:
new_isotopes.append(item)
else:
new_dataitem.append(item)
if len(new_isotopes) != 0:
tmp = []
try:
tmp = self.get(cycle_list,new_dataitem + ['iso_massf'],new_isotopes,sparse=sparse)
except: # in some old se files there maybe still yps as the name for the abundance arrays
tmp = self.get(cycle_list,new_dataitem + ['yps'],new_isotopes,sparse=sparse)
# modify the dat list so dat is structured like dataitems
dat = []
#make sure tmp containes the data as a list of cycles
if isinstance(cycle_list, basestring):
tmp = [tmp]
else:
try:
if len(cycle_list) == 1:
tmp = [tmp]
except TypeError:
tmp = [tmp]
for cyc in tmp:
temp_dataitem = []
for item in dataitem:
if item in new_dataitem:
temp_dataitem.append(cyc[new_dataitem.index(item)])
else:
if len(new_dataitem) == 0:
temp_dataitem = cyc
else:
if len(new_isotopes) == 1:
temp_dataitem.append(cyc[-1])
else:
temp_dataitem.append(cyc[-1][new_isotopes.index(item)])
dat.append(temp_dataitem)
if (len(dat) == 1) and (not nested_list):
dat = dat[0]
return dat
else:
# there is an implicite rule here that if you want 2D arrays you have
# to give 3 args, or, in other words you have to give a cycle or cycle
# array; there is no good reason for that, except the programmers
# laziness
option_ind = 3
cycle_list = cycle_list
dataitem = dataitem
isotopes_of_interest = isotope
# we need to find out the shellnb to know if any yps array may just be
# a one row array, as - for example- in the surf.h5 files
# SJONES: I think here we only need to look at the first shellnb(!)
#shellnb=self.get(cycle_list,'shellnb')
try: #check if cycle_list is not a list
cycle_list[0]
except (TypeError,IndexError):
cycle_list = [cycle_list]
shellnb=self.get(cycle_list[0],'shellnb')
if sparse <1:
sparse=1
# Just in case the user inputs integers
try:
for x in range(len(cycle_list)):
cycle_list[x] = str(cycle_list[x])
except TypeError:
cycle_list = [str(cycle_list)]
if option_ind != 1:
try: #if it is a single cycle make sure its formatted correctly
if cycle_list.isdigit():
cycle_list = [cycle_list]
for cycle in cycle_list:
if len(cycle) != len(self.cycles[0]):
#print "a"
diff = len(self.cycles[0])-len(cycle)
OO = ''
while diff >=1:
OO+='0'
cycle = OO+cycle
except AttributeError: ##if it is a list of cycles make sure its formatted correctly
if cycle_list[0].isdigit():
for x in range(len(cycle_list)):
if len(str(cycle_list[x])) != len(str(self.cycles[0])):
#print "b"
diff = len(str(self.cycles[0]))-len(str(cycle_list[x]))
OO = ''
while diff >=1:
OO+='0'
diff-=1
try:
cycle_list[x] = OO+cycle_list[x]
except TypeError:
cycle_list[0] = OO+cycle_list[0]
dat = []
cycle_list.sort()
cyclelist=np.array(list(map(int, cycle_list)))
# cycles_requested is a list of indices from cyclelist
# The index of the larges and smallest indices should be stored
# in sorted order. As new requests are made if the requests
# border or over lap then only keep the index of the larges and
# smallest indices.
cycles_requested = []
# Sometimes bad data or last restart.h5 files contain no cycles,
# causing the code to crash. Do a simple try/except here:
file_min=[]
file_max=[]
try:
for h5 in self.h5s:
file_min.append(int(h5.cycle[0]))
file_max.append(int(h5.cycle[-1]))
except IndexError:
print('File '+h5.filename+' contains no data, please remove or rename it')
print('Once the file has been removed or renamed, the preprocessor file must be re-written. Do this by either removing the file h5Preproc.txt from the data directory or by invoking the se instance with rewrite=True')
print('At present, h5T cannot check for empty files since the overhead using the mounted VOSpace would be too great.')
raise IOError('Cycle-less file encountered')
file_min.sort()
file_max.sort()
for h5 in self.h5s:
#initalize file metadata
min_file = int(h5.cycle[0])
max_file = int(h5.cycle[-1])
min_list = int(cyclelist[0])
max_list = int(cyclelist[-1])
index_min = None #if None start at begining
index_max = None #if None finish at end
# SJONES Now we need to add the case that the set only contains one file:
if len(file_min) == 1:
min_file = min_list - 1
max_file = max_list + 1
else:
file_index = file_min.index(min_file)
if file_index == 0:
if min_list - 1 < min_file:
min_file = min_list - 1
max_file = (file_min[file_index + 1] + max_file)//2
elif file_index == len(file_min) - 1:
min_file = (file_max[file_index - 1] + min_file)//2 + 1
if max_list + 1 > max_file:
max_file = max_list + 1
else:
min_file = (file_max[file_index - 1] + min_file)//2 + 1
max_file = (file_min[file_index + 1] + max_file)//2
# calculate the left and right limits of the intersection
# of the lists h5.cycle and cyclelist
if (max_list < min_file) or (max_file < min_list):
# the lists do not intersect
continue
elif (min_list <= min_file) and (max_file <= max_list):
# all of h5.cycle is within cyclelist
index_min = bisect.bisect_left(cyclelist, min_file)
index_max = bisect.bisect_right(cyclelist, max_file)
elif (min_file <= min_list) and (max_list <= max_file):
# all of cyclelist is within h5.cycle
index_min = None
index_max = None
else:
if min_list > min_file:
# cyclelist overlaps the right edge of h5.cycle
index_min = None
index_max = bisect.bisect_right(cyclelist, max_file)
else:
# cyclelist overlaps the left edge of h5.cylce
index_min = bisect.bisect_left(cyclelist, min_file)
index_max = None
# maintin list of all requested cycles by keeping trak of
# the maximum and minimum indices
imin = index_min
if index_min == None:
imin = 0
imax = index_max
if index_max == None:
imax = len(cyclelist)
request_min = bisect.bisect_left(cycles_requested, imin)
request_max = bisect.bisect_right(cycles_requested, imax)
# if the new request overlabs older request remove them
del cycles_requested[request_min:request_max]
if ((request_max-request_min) % 2) ==1:
# new and old request overlaped on one edge only
if request_min % 2 == 0:
# add new starting index
cycles_requested.insert(request_min, imin)
else:
# add new ending index
cycles_requested.insert(request_min, imax)
else:
# new and old requests overlaped on two edges
if request_min % 2 == 0:
# old request was contained with in new request
cycles_requested.insert(request_min, imin)
cycles_requested.insert(request_min + 1, imax)
else:
# new request wat contained within old request
pass
if not self.h5sStarted[self.h5s.index(h5)]:
h5.start()
h5.join()
temp = h5.fetch_data_sam(dataitem,cycle_list[index_min:index_max],len(cycle_list),len(dat))
self.h5sStarted[self.h5s.index(h5)]=True
else:
temp = h5.fetch_data_sam(dataitem,cycle_list[index_min:index_max],len(cycle_list),len(dat))
temp_dat = []
for temp_num, temp_cycle in enumerate(temp):
temp_dataforcycle = []
for dataitem_num, temp_dataitem in enumerate(temp_cycle):
# identify what cycle the temp data was collected from
temp_dataitem=self.red_dim(temp_dataitem)
# if option_ind == 3 and isotopes_of_interest != []:
if (dataitem[dataitem_num] == 'iso_massf' or dataitem[dataitem_num] == 'yps') and isotopes_of_interest != []:
# Figure out the index
index = []
iso_tmp = []
if 'iso' in dataitem[dataitem_num]: #if we are looking at an isotope
iso_tmp = self.isotopes
else:
iso_tmp = self.elements
for iso in isotopes_of_interest: #finds the location of the isotope
x = iso_tmp.index(iso)
index.append(x)
if index == []:
# if none of the isotopes of interest are found
# then the index defaults to [0], so that the loop
# will still try to acess the data in t.
index = [0]
islist=True
if len(cycle_list)==1:
islist=False
# shellnb_index = 0
# if index_min == None:
# shellnb_index = temp_num
# else:
# shellnb_index = index_min + temp_num
temp_multicyc = []
for i in index:
# if islist:
# if shellnb[shellnb_index] == 1: # again take care of 1-row 2D arrays
if shellnb == 1: # again take care of 1-row 2D arrays
temp_multicyc.append(temp_dataitem[i])
else:
temp_multicyc.append(temp_dataitem[:,i])
# else:
# if shellnb == 1: # again take care of 1-row 2D arrays
# temp_multicyc.append(temp_dataitem[i])
# else:
# temp_multicyc.append(temp_dataitem[:,i])
if len(temp_multicyc) == 1: # agian take care of 1-row arrays
temp_multicyc = temp_multicyc[0]
temp_dataitem = temp_multicyc
temp_dataforcycle.append(temp_dataitem)
if len(temp_dataforcycle) == 1: # agian take care of 1-row arrays
temp_dataforcycle = temp_dataforcycle[0]
# Now add the information to the list we pass back
temp_dat.append(temp_dataforcycle)
# calculate the proper insertion point for the data colected from
# the file h5 in self.h5s
insert_pnt = 0
if index_min is not None: #alex: in py2: x < None == False
for i in range(len(cycles_requested)):
if i % 2 == 1:
if cycles_requested[i] < index_min:
insert_pnt += cycles_requested[i] - cycles_requested[i-1]
elif cycles_requested[i - 1] < index_min:
insert_pnt += index_min - cycles_requested[i - 1]
# insert the cycle data from the current file into the apropiat place
# in the output data.
dat[insert_pnt:insert_pnt] = temp_dat
#check if cycles were not requested from the file
# SJONES comment
# missing_cycles = np.array([])
# if len(cycles_requested) != 2:
# if len(cycles_requested) == 0:
# missing_cycles = np.array([cycle_list])
# else:
# cycles_requested = [None] + cycles_requested + [None]
# for i in xrange(0, len(cycles_requested), 2):
# min = cycles_requested[i]
# max = cycles_requested[i + 1]
# missing_cycles = np.append(missing_cycles, cycle_list[min:max])
# print "The requested cycles: " + str(missing_cycles) + " are not available in this data set"
# elif (cycles_requested[0] != 0) or (cycles_requested[1] != len(cyclelist)):
# min = cycles_requested[0]
# max = cycles_requested[1]
# missing_cycles = np.append(missing_cycles, cycle_list[0:min])
# missing_cycles = np.append(missing_cycles, cycle_list[max:])
# print "The requested cycles: " + str(missing_cycles) + " are not available in this data set"
if len(dat) < 2 and option_ind != 3 and (not nested_list):
try:
dat = dat[0]
except IndexError:
None
except TypeError:
None
try:
if len(dat) < 2 and isotopes_of_interest != []:
dat = dat[0]
except TypeError:
None
except IndexError:
None
t2=time.time()
return dat | Get Data from HDF5 files.
There are three ways to call this function
1. get(dataitem)
Fetches the datatiem for all cycles. If dataitem is a header
attribute or list of attributes then the data is retured.
If detaitem an individulal or list of column attributes,
data columns or isotopes/elements the data is returned for
all cycles.
2. get(cycle_list, dataitem)
Fetches the dataitem or list of dataitems for the cycle
or list of cycles. The variable dataitems can contain column
attributes, data columns, and isotopes/elemnts.
3. get(cycle_list, dataitem, isotope)
Fetches the dataitems like the seccond method except that
one of the dataitems must be either "iso_massf" or "yps",
and in the data returned "iso_massf" and "yps" are replaced
with the data from the isotopes. The isotopes must be in
the form given by se.isotopes or se.elements.
Parameters
----------
cycle_list : list, integer or string
If cycle_list is a list or string and all of the entries
are header attributes then the attributes are returned.
If cycle_list is a list or string of dataitems then the
dataitems are fetched for all cycles.
If cycle_list is a list, integer or string of cycle numbers
then data is returned for those cycles.
dataitem: list or string, optional
If dataitem is not None then the data for each item is
returned for the cycle or list of cycles. dataitem may be an
individual or a mixed list of column attributes, column
data or isotopes/elements. If dataitem is None then
cycle_list must be a string. The default is None.
isotope: list or string, optional
If one of the dataitems is "iso_massf" or "yps" then it is
replaced with the data from the individual isotopes/elements
listed in isotope. The default is None.
sparse : int
Implements a sparsity factor on the fetched data i.e. only
the i th cycle in cycle_list data is returned,
where i = sparse. |
def __recognize_scalar(self, node: yaml.Node,
expected_type: Type) -> RecResult:
"""Recognize a node that we expect to be a scalar.
Args:
node: The node to recognize.
expected_type: The type it is expected to be.
Returns:
A list of recognized types and an error message
"""
logger.debug('Recognizing as a scalar')
if (isinstance(node, yaml.ScalarNode)
and node.tag == scalar_type_to_tag[expected_type]):
return [expected_type], ''
message = 'Failed to recognize a {}\n{}\n'.format(
type_to_desc(expected_type), node.start_mark)
return [], message | Recognize a node that we expect to be a scalar.
Args:
node: The node to recognize.
expected_type: The type it is expected to be.
Returns:
A list of recognized types and an error message |
def add_arguments(cls, parser, sys_arg_list=None):
"""
Arguments for the TCP health monitor plugin.
"""
parser.add_argument('--tcp_check_interval',
dest='tcp_check_interval',
required=False, default=2, type=float,
help="TCP health-test interval in seconds, "
"default 2 "
"(only for 'tcp' health monitor plugin)")
parser.add_argument('--tcp_check_port',
dest='tcp_check_port',
required=False, default=22, type=int,
help="Port for TCP health-test, default 22 "
"(only for 'tcp' health monitor plugin)")
return ["tcp_check_interval", "tcp_check_port"] | Arguments for the TCP health monitor plugin. |
def generate(self, information, timeout=-1):
"""
Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients.
Args:
information (dict): Information to generate the certificate for RabbitMQ clients.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: RabbitMQ certificate generated
"""
return self._client.create(information, timeout=timeout) | Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients.
Args:
information (dict): Information to generate the certificate for RabbitMQ clients.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: RabbitMQ certificate generated |
def pull_guest_properties(self):
"""Get the list of the guest properties matching a set of patterns along
with their values, timestamps and flags and give responsibility for
managing properties to the console.
out names of type str
The names of the properties returned.
out values of type str
The values of the properties returned. The array entries match the
corresponding entries in the @a name array.
out timestamps of type int
The timestamps of the properties returned. The array entries match
the corresponding entries in the @a name array.
out flags of type str
The flags of the properties returned. The array entries match the
corresponding entries in the @a name array.
"""
(names, values, timestamps, flags) = self._call("pullGuestProperties")
return (names, values, timestamps, flags) | Get the list of the guest properties matching a set of patterns along
with their values, timestamps and flags and give responsibility for
managing properties to the console.
out names of type str
The names of the properties returned.
out values of type str
The values of the properties returned. The array entries match the
corresponding entries in the @a name array.
out timestamps of type int
The timestamps of the properties returned. The array entries match
the corresponding entries in the @a name array.
out flags of type str
The flags of the properties returned. The array entries match the
corresponding entries in the @a name array. |
def compile_mof_string(self, mof_str, namespace=None, search_paths=None,
verbose=None):
"""
Compile the MOF definitions in the specified string and add the
resulting CIM objects to the specified CIM namespace of the mock
repository.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
This method supports all MOF pragmas, and specifically the include
pragma.
If a CIM class or CIM qualifier type to be added already exists in the
target namespace with the same name (comparing case insensitively),
this method raises :exc:`~pywbem.CIMError`.
If a CIM instance to be added already exists in the target namespace
with the same keybinding values, this method raises
:exc:`~pywbem.CIMError`.
In all cases where this method raises an exception, the mock repository
remains unchanged.
Parameters:
mof_str (:term:`string`):
A string with the MOF definitions to be compiled.
namespace (:term:`string`):
The name of the target CIM namespace in the mock repository. This
namespace is also used for lookup of any existing or dependent
CIM objects. If `None`, the default namespace of the connection is
used.
search_paths (:term:`py:iterable` of :term:`string`):
An iterable of directory path names where MOF dependent files will
be looked up.
See the description of the `search_path` init parameter of the
:class:`~pywbem.MOFCompiler` class for more information on MOF
dependent files.
verbose (:class:`py:bool`):
Controls whether to issue more detailed compiler messages.
Raises:
IOError: MOF file not found.
:exc:`~pywbem.MOFParseError`: Compile error in the MOF.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
:exc:`~pywbem.CIMError`: Failure related to the CIM objects in the
mock repository.
"""
namespace = namespace or self.default_namespace
# if not self._validate_namespace(namespace): TODO
# self.add_namespace(namespace)
self._validate_namespace(namespace)
mofcomp = MOFCompiler(_MockMOFWBEMConnection(self),
search_paths=search_paths,
verbose=verbose)
mofcomp.compile_string(mof_str, namespace) | Compile the MOF definitions in the specified string and add the
resulting CIM objects to the specified CIM namespace of the mock
repository.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
This method supports all MOF pragmas, and specifically the include
pragma.
If a CIM class or CIM qualifier type to be added already exists in the
target namespace with the same name (comparing case insensitively),
this method raises :exc:`~pywbem.CIMError`.
If a CIM instance to be added already exists in the target namespace
with the same keybinding values, this method raises
:exc:`~pywbem.CIMError`.
In all cases where this method raises an exception, the mock repository
remains unchanged.
Parameters:
mof_str (:term:`string`):
A string with the MOF definitions to be compiled.
namespace (:term:`string`):
The name of the target CIM namespace in the mock repository. This
namespace is also used for lookup of any existing or dependent
CIM objects. If `None`, the default namespace of the connection is
used.
search_paths (:term:`py:iterable` of :term:`string`):
An iterable of directory path names where MOF dependent files will
be looked up.
See the description of the `search_path` init parameter of the
:class:`~pywbem.MOFCompiler` class for more information on MOF
dependent files.
verbose (:class:`py:bool`):
Controls whether to issue more detailed compiler messages.
Raises:
IOError: MOF file not found.
:exc:`~pywbem.MOFParseError`: Compile error in the MOF.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
:exc:`~pywbem.CIMError`: Failure related to the CIM objects in the
mock repository. |
def setup_database(config_data):
"""
Run the migrate command to create the database schema
:param config_data: configuration data
"""
with chdir(config_data.project_directory):
env = deepcopy(dict(os.environ))
env[str('DJANGO_SETTINGS_MODULE')] = str('{0}.settings'.format(config_data.project_name))
env[str('PYTHONPATH')] = str(os.pathsep.join(map(shlex_quote, sys.path)))
commands = []
commands.append(
[sys.executable, '-W', 'ignore', 'manage.py', 'migrate'],
)
if config_data.verbose:
sys.stdout.write(
'Database setup commands: {0}\n'.format(
', '.join([' '.join(cmd) for cmd in commands])
)
)
for command in commands:
try:
output = subprocess.check_output(
command, env=env, stderr=subprocess.STDOUT
)
sys.stdout.write(output.decode('utf-8'))
except subprocess.CalledProcessError as e: # pragma: no cover
if config_data.verbose:
sys.stdout.write(e.output.decode('utf-8'))
raise
if not config_data.no_user:
sys.stdout.write('Creating admin user\n')
if config_data.noinput:
create_user(config_data)
else:
subprocess.check_call(' '.join(
[sys.executable, '-W', 'ignore', 'manage.py', 'createsuperuser']
), shell=True, stderr=subprocess.STDOUT) | Run the migrate command to create the database schema
:param config_data: configuration data |
def filter_update(self, id, phrase = None, context = None, irreversible = None, whole_word = None, expires_in = None):
"""
Updates the filter with the given `id`. Parameters are the same
as in `filter_create()`.
Returns the `filter dict`_ of the updated filter.
"""
id = self.__unpack_id(id)
params = self.__generate_params(locals(), ['id'])
url = '/api/v1/filters/{0}'.format(str(id))
return self.__api_request('PUT', url, params) | Updates the filter with the given `id`. Parameters are the same
as in `filter_create()`.
Returns the `filter dict`_ of the updated filter. |
def requires_user(fn):
"""
Requires that the calling Subject be *either* authenticated *or* remembered
via RememberMe services before allowing access.
This method essentially ensures that subject.identifiers IS NOT None
:raises UnauthenticatedException: indicating that the decorated method is
not allowed to be executed because the
Subject attempted to perform a user-only
operation
"""
@functools.wraps(fn)
def wrap(*args, **kwargs):
subject = Yosai.get_current_subject()
if subject.identifiers is None:
msg = ("Attempting to perform a user-only operation. The "
"current Subject is NOT a user (they haven't been "
"authenticated or remembered from a previous login). "
"ACCESS DENIED.")
raise UnauthenticatedException(msg)
return fn(*args, **kwargs)
return wrap | Requires that the calling Subject be *either* authenticated *or* remembered
via RememberMe services before allowing access.
This method essentially ensures that subject.identifiers IS NOT None
:raises UnauthenticatedException: indicating that the decorated method is
not allowed to be executed because the
Subject attempted to perform a user-only
operation |
def find_dups(file_dict):
'''takes output from :meth:`scan_dir` and returns list of duplicate files'''
found_hashes = {}
for f in file_dict:
if file_dict[f]['md5'] not in found_hashes:
found_hashes[file_dict[f]['md5']] = []
found_hashes[file_dict[f]['md5']].append(f)
final_hashes = dict(found_hashes)
for h in found_hashes:
if len(found_hashes[h])<2:
del(final_hashes[h])
return final_hashes.values() | takes output from :meth:`scan_dir` and returns list of duplicate files |
def refresh(self):
"""Refresh the cache by deleting the old one and creating a new one.
"""
if self.exists:
self.delete()
self.populate()
self.open() | Refresh the cache by deleting the old one and creating a new one. |
def prepare_dispatch(self):
# pylint:disable=too-many-branches, too-many-statements, too-many-locals
"""
Prepare dispatch, so prepare for each daemon (schedulers, brokers, receivers, reactionners,
pollers)
This function will only prepare something if self.new_to_dispatch is False
It will reset the first_dispatch_done flag
A DispatcherError exception is raised if a configuration is already prepared! Unset the
new_to_dispatch flag before calling!
:return: None
"""
if self.new_to_dispatch:
raise DispatcherError("A configuration is already prepared!")
# So we are preparing a new dispatching...
self.new_to_dispatch = True
self.first_dispatch_done = False
# Update Alignak name for all the satellites
for daemon_link in self.all_daemons_links:
daemon_link.cfg.update({'alignak_name': self.alignak_conf.alignak_name})
logger.info("Preparing realms dispatch:")
# Prepare the arbiters configuration
master_arbiter_cfg = arbiters_cfg = {}
for arbiter_link in self.get_satellites_list('arbiters'):
# # If not me and not a spare arbiter...
# if arbiter_link == self.arbiter_link:
# # I exclude myself from the dispatching, I have my configuration ;)
# continue
if not arbiter_link.active:
# I exclude the daemons that are not active
continue
arbiter_cfg = arbiter_link.cfg
arbiter_cfg.update({
'managed_hosts_names': [h.get_name() for h in self.alignak_conf.hosts],
'modules': serialize(arbiter_link.modules, True),
'managed_conf_id': self.alignak_conf.instance_id,
'push_flavor': ''
})
# Hash the configuration
cfg_string = json.dumps(arbiter_cfg, sort_keys=True).encode('utf-8')
arbiter_cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Update the arbiters list, but do not include the whole conf
arbiters_cfg[arbiter_link.uuid] = arbiter_cfg['self_conf']
# Not for the master arbiter...
if arbiter_link != self.arbiter_link:
arbiter_cfg.update({
'arbiters': master_arbiter_cfg,
'whole_conf': self.alignak_conf.spare_arbiter_conf,
})
# Hash the whole configuration
try:
s_conf_part = json.dumps(arbiter_cfg, sort_keys=True).encode('utf-8')
except UnicodeDecodeError:
pass
arbiter_cfg['hash'] = hashlib.sha1(s_conf_part).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(arbiter_cfg)
logger.info(' arbiter configuration size: %d bytes', sys.getsizeof(pickled_conf))
# The configuration is assigned to the arbiter
# todo: perhaps this should be done in the realms (like schedulers and satellites)?
arbiter_link.cfg = arbiter_cfg
arbiter_link.cfg_to_manage = self.alignak_conf
arbiter_link.push_flavor = arbiter_cfg['push_flavor']
arbiter_link.hash = arbiter_cfg['hash']
arbiter_link.need_conf = False
arbiter_link.configuration_sent = False
# If not me and not a spare arbiter...
if arbiter_link == self.arbiter_link:
# The master arbiter configuration for the other satellites
master_arbiter_cfg = {self.arbiter_link.uuid: arbiter_cfg['self_conf']}
logger.info(' arbiter configuration prepared for %s', arbiter_link.name)
# main_realm = self.alignak_conf.realms.find_by_name('All')
# all_realms = main_realm.all_sub_members
# for realm_uuid in all_realms:
# realm = self.alignak_conf.realms[realm_uuid]
# logger.info("- realm %s: %s", realm_uuid, realm)
for realm in self.alignak_conf.realms:
logger.info("- realm %s: %d configuration part(s)", realm.name, len(realm.parts))
# parts_to_dispatch is a list of configuration parts built when
# the configuration is split into parts for the realms and their schedulers
# Only get the parts that are not yet assigned to a scheduler
parts_to_dispatch = [cfg for cfg in list(realm.parts.values()) if not cfg.is_assigned]
if not parts_to_dispatch:
logger.info(' no configuration to dispatch for this realm!')
continue
logger.info(" preparing the dispatch for schedulers:")
# Now we get all the schedulers of this realm and upper
# schedulers = self.get_scheduler_ordered_list(realm)
schedulers = realm.get_potential_satellites_by_type(
self.get_satellites_list('schedulers'), 'scheduler')
if not schedulers:
logger.error(' no available schedulers in this realm (%s)!', realm)
continue
logger.info(" realm schedulers: %s",
','.join([s.get_name() for s in schedulers]))
for cfg_part in parts_to_dispatch:
logger.info(" .assigning configuration part %s (%s), name:%s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.config_name)
# we need to loop until the configuration part is assigned to a scheduler
# or no more scheduler is available
while True:
try:
scheduler_link = schedulers.pop()
except IndexError: # No more schedulers.. not good, no loop
# The configuration part do not need to be dispatched anymore
# todo: should be managed inside the Realm class!
logger.error("No more scheduler link: %s", realm)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
realm.to_satellites[sat_type][cfg_part.instance_id] = None
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = \
False
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
break
# if scheduler_link.manage_sub_realms:
# logger.warning('[%s] The scheduler %s is configured to manage sub realms.'
# ' This is not yet possible, sorry!',
# realm.name, scheduler_link.name)
# scheduler_link.manage_sub_realms = False
# continue
if not scheduler_link.need_conf:
logger.info('[%s] The scheduler %s do not need any configuration, sorry',
realm.name, scheduler_link.name)
continue
logger.debug(" preparing configuration part '%s' for the scheduler '%s'",
cfg_part.instance_id, scheduler_link.name)
logger.debug(" - %d hosts, %d services",
len(cfg_part.hosts), len(cfg_part.services))
# Serialization and hashing
s_conf_part = serialize(realm.parts[cfg_part.instance_id])
try:
s_conf_part = s_conf_part.encode('utf-8')
except UnicodeDecodeError:
pass
cfg_part.push_flavor = hashlib.sha1(s_conf_part).hexdigest()
# We generate the scheduler configuration for the satellites:
# ---
sat_scheduler_cfg = scheduler_link.give_satellite_cfg()
sat_scheduler_cfg.update({
'managed_hosts_names': [h.get_name() for h in cfg_part.hosts],
'managed_conf_id': cfg_part.instance_id,
'push_flavor': cfg_part.push_flavor
})
# Generate a configuration hash
cfg_string = json.dumps(sat_scheduler_cfg, sort_keys=True).encode('utf-8')
sat_scheduler_cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
logger.debug(' satellite scheduler configuration: %s', sat_scheduler_cfg)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
realm.to_satellites[sat_type][cfg_part.instance_id] = sat_scheduler_cfg
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = True
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
# ---
scheduler_link.cfg.update({
# Global instance configuration
'instance_id': scheduler_link.instance_id,
'instance_name': scheduler_link.name,
'schedulers': {scheduler_link.uuid: sat_scheduler_cfg},
'arbiters': arbiters_cfg if scheduler_link.manage_arbiters else {},
'satellites': realm.get_links_for_a_scheduler(self.pollers,
self.reactionners,
self.brokers),
'modules': serialize(scheduler_link.modules, True),
'conf_part': serialize(realm.parts[cfg_part.instance_id]),
'managed_conf_id': cfg_part.instance_id,
'push_flavor': cfg_part.push_flavor,
'override_conf': scheduler_link.get_override_configuration()
})
# Hash the whole configuration
cfg_string = json.dumps(scheduler_link.cfg, sort_keys=True).encode('utf-8')
scheduler_link.cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(scheduler_link.cfg)
logger.info(" scheduler configuration size: %d bytes",
sys.getsizeof(pickled_conf))
logger.info(" scheduler satellites:")
satellites = realm.get_links_for_a_scheduler(self.pollers,
self.reactionners,
self.brokers)
for sat_type in satellites:
logger.info(" - %s", sat_type)
for sat_link_uuid in satellites[sat_type]:
satellite = satellites[sat_type][sat_link_uuid]
logger.info(" %s", satellite['name'])
# The configuration part is assigned to a scheduler
cfg_part.is_assigned = True
cfg_part.scheduler_link = scheduler_link
scheduler_link.cfg_to_manage = cfg_part
scheduler_link.push_flavor = cfg_part.push_flavor
scheduler_link.hash = scheduler_link.cfg['hash']
scheduler_link.need_conf = False
scheduler_link.configuration_sent = False
logger.info(' configuration %s (%s) assigned to %s',
cfg_part.instance_id, cfg_part.push_flavor, scheduler_link.name)
# The configuration part is assigned to a scheduler, no need to go further ;)
break
logger.info(" preparing the dispatch for satellites:")
for cfg_part in list(realm.parts.values()):
logger.info(" .configuration part %s (%s), name:%s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.config_name)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
if cfg_part.instance_id not in realm.to_satellites_need_dispatch[sat_type]:
logger.warning(" nothing to dispatch for %ss", sat_type)
return
if not realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id]:
logger.warning(" no need to dispatch to %ss", sat_type)
return
# Get the list of the concerned satellites
satellites = realm.get_potential_satellites_by_type(self.satellites, sat_type)
if satellites:
logger.info(" realm %ss: %s",
sat_type, ','.join([s.get_name() for s in satellites]))
else:
logger.info(" no %s satellites", sat_type)
# Now we dispatch cfg to every one ask for it
nb_cfg_prepared = 0
for sat_link in satellites:
if not sat_link.active:
# I exclude the daemons that are not active
continue
if nb_cfg_prepared > realm.get_nb_of_must_have_satellites(sat_type):
logger.warning("Too much configuration parts prepared "
"for the expected satellites count. "
"Realm: %s, satellite: %s - prepared: %d out of %d",
realm.name, sat_link.name, nb_cfg_prepared,
realm.get_nb_of_must_have_satellites(sat_type))
# Fred - 2018-07-20 - temporary disable this error raising!
# raise DispatcherError("Too much configuration parts prepared "
# "for the expected satellites count. "
# "This should never happen!")
logger.info(" preparing configuration part '%s' for the %s '%s'",
cfg_part.instance_id, sat_type, sat_link.name)
sat_link.cfg.update({
# Global instance configuration
'arbiters': arbiters_cfg if sat_link.manage_arbiters else {},
'modules': serialize(sat_link.modules, True),
'managed_conf_id': 'see_my_schedulers',
'global_conf': self.global_conf
})
sat_link.cfg['schedulers'].update({
cfg_part.uuid: realm.to_satellites[sat_type][cfg_part.instance_id]})
# Brokers should have pollers and reactionners links too
if sat_type == "broker":
sat_link.cfg.update({'satellites': realm.get_links_for_a_broker(
self.pollers, self.reactionners, self.receivers,
self.alignak_conf.realms, sat_link.manage_sub_realms)})
# Hash the whole configuration
cfg_string = json.dumps(sat_link.cfg, sort_keys=True).encode('utf-8')
sat_link.cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(sat_link.cfg)
logger.info(' %s configuration size: %d bytes',
sat_type, sys.getsizeof(pickled_conf))
# The configuration part is assigned to a satellite
sat_link.cfg_to_manage = cfg_part
sat_link.push_flavor = cfg_part.push_flavor
sat_link.hash = sat_link.cfg['hash']
sat_link.need_conf = False
sat_link.configuration_sent = False
logger.info(' configuration %s (%s) assigned to %s',
cfg_part.instance_id, cfg_part.push_flavor, sat_link.name)
nb_cfg_prepared += 1
realm.to_satellites_managed_by[sat_type][
cfg_part.instance_id].append(sat_link)
# I've got enough satellite, the next ones are considered unuseful!
if nb_cfg_prepared == realm.get_nb_of_must_have_satellites(sat_type):
logger.info(" no more %s needed in this realm.", sat_type)
realm.to_satellites_need_dispatch[sat_type][
cfg_part.instance_id] = False
nb_missed = len([cfg for cfg in list(
self.alignak_conf.parts.values()) if not cfg.is_assigned])
if nb_missed > 0:
logger.warning("Some configuration parts are not dispatched, %d are missing", nb_missed)
else:
logger.info("All configuration parts are assigned "
"to schedulers and their satellites :)")
# Schedulers without a configuration in a dispatch ok do not need a configuration
# so they do not raise dispatching errors if they are not used
for scheduler_link in self.schedulers:
if not scheduler_link.cfg_to_manage:
# "so it do not ask anymore for conf"
logger.warning('The scheduler %s do not need a configuration!', scheduler_link.name)
scheduler_link.need_conf = False | Prepare dispatch, so prepare for each daemon (schedulers, brokers, receivers, reactionners,
pollers)
This function will only prepare something if self.new_to_dispatch is False
It will reset the first_dispatch_done flag
A DispatcherError exception is raised if a configuration is already prepared! Unset the
new_to_dispatch flag before calling!
:return: None |
def _commit_handler(self, cmd):
"""
Special handler for hostname change on commit operation. Also handles username removal
which prompts for confirmation (username removal prompts for each user...)
"""
current_prompt = self.device.find_prompt().strip()
terminating_char = current_prompt[-1]
# Look for trailing pattern that includes '#' and '>'
pattern1 = r"[>#{}]\s*$".format(terminating_char)
# Handle special username removal pattern
pattern2 = r".*all username.*confirm"
patterns = r"(?:{}|{})".format(pattern1, pattern2)
output = self.device.send_command_expect(cmd, expect_string=patterns)
loop_count = 50
new_output = output
for i in range(loop_count):
if re.search(pattern2, new_output):
# Send confirmation if username removal
new_output = self.device.send_command_timing(
"\n", strip_prompt=False, strip_command=False
)
output += new_output
else:
break
# Reset base prompt in case hostname changed
self.device.set_base_prompt()
return output | Special handler for hostname change on commit operation. Also handles username removal
which prompts for confirmation (username removal prompts for each user...) |
def _fix_permissions(self):
"""
Because docker run as root we need to fix permission and ownership to allow user to interact
with it from their filesystem and do operation like file delete
"""
state = yield from self._get_container_state()
if state == "stopped" or state == "exited":
# We need to restart it to fix permissions
yield from self.manager.query("POST", "containers/{}/start".format(self._cid))
for volume in self._volumes:
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
name=self._name, image=self._image, path=volume))
process = yield from asyncio.subprocess.create_subprocess_exec(
"docker",
"exec",
self._cid,
"/gns3/bin/busybox",
"sh",
"-c",
"("
"/gns3/bin/busybox find \"{path}\" -depth -print0"
" | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c '%a:%u:%g:%n' > \"{path}/.gns3_perms\""
")"
" && /gns3/bin/busybox chmod -R u+rX \"{path}\""
" && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\""
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
)
yield from process.wait() | Because docker run as root we need to fix permission and ownership to allow user to interact
with it from their filesystem and do operation like file delete |
def pad_to(unpadded, target_len):
"""
Pad a string to the target length in characters, or return the original
string if it's longer than the target length.
"""
under = target_len - len(unpadded)
if under <= 0:
return unpadded
return unpadded + (' ' * under) | Pad a string to the target length in characters, or return the original
string if it's longer than the target length. |
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator."""
with tf.variable_scope("discriminator"):
net = layers().Conv2D(
filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net | A simple single-layer convolutional discriminator. |
def _histogram_data(iterator):
""" Yields only the row contents that contain the histogram entries """
histogram_started = False
header_passed = False
for l in iterator:
if '## HISTOGRAM' in l:
histogram_started = True
elif histogram_started:
if header_passed:
values = l.rstrip().split("\t")
problem_type, name = values[0].split(':')
yield problem_type, name, int(values[1])
elif l.startswith('Error Type'):
header_passed = True | Yields only the row contents that contain the histogram entries |
def _compute_nfps_uniform(cum_counts, sizes):
"""Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
"""
nfps = np.zeros((len(sizes), len(sizes)))
# All u an l are inclusive bounds for intervals.
# Compute p = 1, the NFPs
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)
return nfps | Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1. |
def discovery_print(pkt):
"""Scandevice callback. Register src mac to avoid src repetition.
Print device on screen.
:param scapy.packet.Packet pkt: Scapy Packet
:return: None
"""
if pkt.src in mac_id_list:
return
mac_id_list.append(pkt.src)
text = pkt_text(pkt)
click.secho(text, fg='magenta') if 'Amazon' in text else click.echo(text) | Scandevice callback. Register src mac to avoid src repetition.
Print device on screen.
:param scapy.packet.Packet pkt: Scapy Packet
:return: None |
def deallocate_ip(self, hostipaddress):
"""
Object method takes in input of hostip address,removes them from the parent ip scope.
:param hostid: str of the hostid of the target host ip record
:return:
"""
delete_host_from_segment(hostipaddress, self.netaddr, self.auth, self.url) | Object method takes in input of hostip address,removes them from the parent ip scope.
:param hostid: str of the hostid of the target host ip record
:return: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.