code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def unicode_to_string(self):
"""Convert unicode in string
"""
for tag in self.tags:
self.ununicode.append(str(tag)) | Convert unicode in string |
def syscall_direct(*events):
'''
Directly process these events. This should never be used for normal events.
'''
def _syscall(scheduler, processor):
for e in events:
processor(e)
return _syscall | Directly process these events. This should never be used for normal events. |
def schedule_downtime(scope,
api_key=None,
app_key=None,
monitor_id=None,
start=None,
end=None,
message=None,
recurrence=None,
timezone=None,
test=False):
'''
Schedule downtime for a scope of monitors.
CLI Example:
.. code-block:: bash
salt-call datadog.schedule_downtime 'host:app2' \\
stop=$(date --date='30 minutes' +%s) \\
app_key='0123456789' \\
api_key='9876543210'
Optional arguments
:param monitor_id: The ID of the monitor
:param start: Start time in seconds since the epoch
:param end: End time in seconds since the epoch
:param message: A message to send in a notification for this downtime
:param recurrence: Repeat this downtime periodically
:param timezone: Specify the timezone
'''
ret = {'result': False,
'response': None,
'comment': ''}
if api_key is None:
raise SaltInvocationError('api_key must be specified')
if app_key is None:
raise SaltInvocationError('app_key must be specified')
if test is True:
ret['result'] = True
ret['comment'] = 'A schedule downtime API call would have been made.'
return ret
_initialize_connection(api_key, app_key)
# Schedule downtime
try:
response = datadog.api.Downtime.create(scope=scope,
monitor_id=monitor_id,
start=start,
end=end,
message=message,
recurrence=recurrence,
timezone=timezone)
except ValueError:
comment = ('Unexpected exception in Datadog Schedule Downtime API '
'call. Are your keys correct?')
ret['comment'] = comment
return ret
ret['response'] = response
if 'active' in response.keys():
ret['result'] = True
ret['comment'] = 'Successfully scheduled downtime'
return ret | Schedule downtime for a scope of monitors.
CLI Example:
.. code-block:: bash
salt-call datadog.schedule_downtime 'host:app2' \\
stop=$(date --date='30 minutes' +%s) \\
app_key='0123456789' \\
api_key='9876543210'
Optional arguments
:param monitor_id: The ID of the monitor
:param start: Start time in seconds since the epoch
:param end: End time in seconds since the epoch
:param message: A message to send in a notification for this downtime
:param recurrence: Repeat this downtime periodically
:param timezone: Specify the timezone |
def is_complete(self):
"""Returns True if this is a complete solution, i.e, all nodes are allocated
Returns
-------
bool
True if all nodes are llocated.
"""
return all(
[node.route_allocation() is not None for node in list(self._nodes.values()) if node != self._problem.depot()]
) | Returns True if this is a complete solution, i.e, all nodes are allocated
Returns
-------
bool
True if all nodes are llocated. |
def getVisibility(self):
'''
Gets the View visibility
'''
try:
if self.map[GET_VISIBILITY_PROPERTY] == 'VISIBLE':
return VISIBLE
elif self.map[GET_VISIBILITY_PROPERTY] == 'INVISIBLE':
return INVISIBLE
elif self.map[GET_VISIBILITY_PROPERTY] == 'GONE':
return GONE
else:
return -2
except:
return -1 | Gets the View visibility |
def download(self,
url,
dest_path=None):
"""
:param url:
:type url: str
:param dest_path:
:type dest_path: str
"""
if os.path.exists(dest_path):
os.remove(dest_path)
resp = get(url, stream=True)
size = int(resp.headers.get("content-length"))
label = "Downloading {filename} ({size:.2f}MB)".format(
filename=os.path.basename(dest_path),
size=size / float(self.chunk_size) / self.chunk_size
)
with open_file(dest_path, 'wb') as file:
content_iter = resp.iter_content(chunk_size=self.chunk_size)
with progressbar(content_iter,
length=size / self.chunk_size,
label=label) as bar:
for chunk in bar:
if chunk:
file.write(chunk)
file.flush() | :param url:
:type url: str
:param dest_path:
:type dest_path: str |
def pow2_quantized_affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, sign_w=True, with_zero_w=False, n_w=8, m_w=2, ste_fine_grained_w=True,
quantize_b=True, sign_b=True, with_zero_b=False, n_b=8, m_b=2, ste_fine_grained_b=True):
"""Pow2 Quantized Affine.
Pow2 Quantized Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the power-of-2 quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) Quantized values are stored as floating point number for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
with_zero_w (bool): Indicate using zero as a quantized value. Default is false.
n_w (int): Bit width used for weight.
m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
with_zero_b (bool): Indicate using zero as a quantized value. Default is false.
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, False)
# Link computation graph
real_w_q = F.pow2_quantize(w, quantize=quantize_w,
sign=sign_w, with_zero=with_zero_w,
n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", n_outmaps, b_init, False)
real_b_q = F.pow2_quantize(b, quantize=quantize_b,
sign=sign_b, with_zero=with_zero_b,
n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.affine(inp, real_w_q, real_b_q, base_axis) | Pow2 Quantized Affine.
Pow2 Quantized Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the power-of-2 quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) Quantized values are stored as floating point number for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
with_zero_w (bool): Indicate using zero as a quantized value. Default is false.
n_w (int): Bit width used for weight.
m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
with_zero_b (bool): Indicate using zero as a quantized value. Default is false.
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) |
def stage(self):
"""Stage python packages for release, verifying everything we can about them."""
if 'PYPI_USER' not in os.environ or 'PYPI_PASS' not in os.environ:
raise BuildError("You must set the PYPI_USER and PYPI_PASS environment variables")
try:
import twine
except ImportError:
raise BuildError("You must install twine in order to release python packages",
suggestion="pip install twine")
if not self.component.has_wheel:
raise BuildError("You can't release a component to a PYPI repository if it doesn't have python packages")
# Make sure we have built distributions ready to upload
wheel = self.component.support_wheel
sdist = "%s-%s.tar.gz" % (self.component.support_distribution, self.component.parsed_version.pep440_string())
wheel_path = os.path.realpath(os.path.abspath(os.path.join(self.component.output_folder, 'python', wheel)))
sdist_path = os.path.realpath(os.path.abspath(os.path.join(self.component.output_folder, 'python', sdist)))
if not os.path.isfile(wheel_path) or not os.path.isfile(sdist_path):
raise BuildError("Could not find built wheel or sdist matching current built version",
sdist_path=sdist_path, wheel_path=wheel_path)
self.dists = [sdist_path, wheel_path] | Stage python packages for release, verifying everything we can about them. |
def get_build_configuration(id=None, name=None):
"""
Retrieve a specific BuildConfiguration
"""
data = get_build_configuration_raw(id, name)
if data:
return utils.format_json(data) | Retrieve a specific BuildConfiguration |
def _set_serial_console(self):
"""
Configures the first serial port to allow a serial console connection.
"""
# activate the first serial port
yield from self._modify_vm("--uart1 0x3F8 4")
# set server mode with a pipe on the first serial port
pipe_name = self._get_pipe_name()
args = [self._vmname, "--uartmode1", "server", pipe_name]
yield from self.manager.execute("modifyvm", args) | Configures the first serial port to allow a serial console connection. |
def process_file(source_file):
"""
Extract text from a file (pdf, txt, eml, csv, json)
:param source_file path to file to read
:return text from file
"""
if source_file.endswith(('.pdf', '.PDF')):
txt = extract_pdf(source_file)
elif source_file.endswith(('.txt', '.eml', '.csv', '.json')):
with open(source_file, 'r') as f:
txt = f.read()
else:
logger.info("Unsupported file extension for file {}".format(source_file))
return ""
return txt | Extract text from a file (pdf, txt, eml, csv, json)
:param source_file path to file to read
:return text from file |
def rollsd(self, scale=1, **kwargs):
'''A :ref:`rolling function <rolling-function>` for
stadard-deviation values:
Same as::
self.rollapply('sd', **kwargs)
'''
ts = self.rollapply('sd', **kwargs)
if scale != 1:
ts *= scale
return ts | A :ref:`rolling function <rolling-function>` for
stadard-deviation values:
Same as::
self.rollapply('sd', **kwargs) |
def _fix_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code != 258 or self.count != 2:
return
# TODO: test this case; need example file
log.warning('TiffTag %i: correcting LSM bitspersample tag', self.code)
value = struct.pack('<HH', *self.value)
self.valueoffset = struct.unpack('<I', value)[0]
parent.filehandle.seek(self.valueoffset)
self.value = struct.unpack('<HH', parent.filehandle.read(4)) | Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag. |
def run( self, for_time=None ):
"""
Run the simulation.
Args:
for_time (:obj:Float, optional): If `for_time` is set, then run the simulation until a set amount of time has passed. Otherwise, run the simulation for a set number of jumps. Defaults to None.
Returns:
None
"""
self.for_time = for_time
try:
self.is_initialised()
except AttributeError:
raise
if self.number_of_equilibration_jumps > 0:
for step in range( self.number_of_equilibration_jumps ):
self.lattice.jump()
self.reset()
if self.for_time:
self.number_of_jumps = 0
while self.lattice.time < self.for_time:
self.lattice.jump()
self.number_of_jumps += 1
else:
for step in range( self.number_of_jumps ):
self.lattice.jump()
self.has_run = True | Run the simulation.
Args:
for_time (:obj:Float, optional): If `for_time` is set, then run the simulation until a set amount of time has passed. Otherwise, run the simulation for a set number of jumps. Defaults to None.
Returns:
None |
def filter(self, dict_name, priority_min='-inf', priority_max='+inf',
start=0, limit=None):
'''Get a subset of a dictionary.
This retrieves only keys with priority scores greater than or
equal to `priority_min` and less than or equal to `priority_max`.
Of those keys, it skips the first `start` ones, and then returns
at most `limit` keys.
With default parameters, this retrieves the entire dictionary,
making it a more expensive version of :meth:`pull`. This can
be used to limit the dictionary by priority score, for instance
using the score as a time stamp and only retrieving values
before or after a specific time; or it can be used to get
slices of the dictionary if there are too many items to use
:meth:`pull`.
This is a read-only operation and does not require a session
lock, but if this is run in a session context, the lock will
be honored.
:param str dict_name: name of the dictionary to retrieve
:param float priority_min: lowest score to retrieve
:param float priority_max: highest score to retrieve
:param int start: number of items to skip
:param int limit: number of items to retrieve
:return: corresponding (partial) Python dictionary
:raise rejester.LockError: if the session lock timed out
'''
conn = redis.Redis(connection_pool=self.pool)
script = conn.register_script('''
if (ARGV[1] == "") or (redis.call("get", KEYS[1]) == ARGV[1])
then
-- find all the keys and priorities within range
local next_keys = redis.call("zrangebyscore", KEYS[3],
ARGV[2], ARGV[3],
"limit", ARGV[4], ARGV[5])
if not next_keys[1] then
return {}
end
local t = {}
for i = 1, #next_keys do
local next_val = redis.call("hget", KEYS[2], next_keys[i])
table.insert(t, next_keys[i])
table.insert(t, next_val)
end
return t
else
-- ERROR: No longer own the lock
return -1
end
''')
if limit is None: limit = -1
res = script(keys=[self._lock_name,
self._namespace(dict_name),
self._namespace(dict_name) + 'keys'],
args=[self._session_lock_identifier or '',
priority_min, priority_max, start, limit])
if res == -1:
raise LockError()
split_res = dict([(self._decode(res[i]),
self._decode(res[i+1]))
for i in xrange(0, len(res)-1, 2)])
return split_res | Get a subset of a dictionary.
This retrieves only keys with priority scores greater than or
equal to `priority_min` and less than or equal to `priority_max`.
Of those keys, it skips the first `start` ones, and then returns
at most `limit` keys.
With default parameters, this retrieves the entire dictionary,
making it a more expensive version of :meth:`pull`. This can
be used to limit the dictionary by priority score, for instance
using the score as a time stamp and only retrieving values
before or after a specific time; or it can be used to get
slices of the dictionary if there are too many items to use
:meth:`pull`.
This is a read-only operation and does not require a session
lock, but if this is run in a session context, the lock will
be honored.
:param str dict_name: name of the dictionary to retrieve
:param float priority_min: lowest score to retrieve
:param float priority_max: highest score to retrieve
:param int start: number of items to skip
:param int limit: number of items to retrieve
:return: corresponding (partial) Python dictionary
:raise rejester.LockError: if the session lock timed out |
def denoise_grid(self, val, expand=1):
"""
for every cell in the grid of 'val' fill all cells
around it to de noise the grid
"""
updated_grid = [[self.grd.get_tile(y,x) \
for x in range(self.grd.grid_width)] \
for y in range(self.grd.grid_height)]
for row in range(self.grd.get_grid_height() - expand):
for col in range(self.grd.get_grid_width() - expand):
updated_grid[row][col] = self.grd.get_tile(row,col) # set original point
if self.grd.get_tile(row,col) == val:
for y in range(-expand, expand):
for x in range(-expand, expand):
new_x = col+x
new_y = row+y
if new_x < 0: new_x = 0
if new_y < 0: new_y = 0
if new_x > self.grd.get_grid_width() - 1: new_x = self.grd.get_grid_width() - 1
if new_y > self.grd.get_grid_height() - 1: new_y = self.grd.get_grid_height() - 1
# randomly NOT denoise to make interesting edges
if expand > 0:
if randint(1,expand * 2) > (expand+1):
updated_grid[new_y][new_x] = val
else:
updated_grid[new_y][new_x] = val
self.grd.replace_grid(updated_grid) | for every cell in the grid of 'val' fill all cells
around it to de noise the grid |
def _make_shred(self, c, name, feature_extractors, sheet_name):
"""Creates a Shred instances from a given contour.
Args:
c: cv2 contour object.
name: string shred name within a sheet.
feature_extractors: iterable of AbstractShredFeature instances.
Returns:
A new Shred instance or None on failure.
"""
height, width, channels = self.orig_img.shape
# bounding rect of currrent contour
r_x, r_y, r_w, r_h = cv2.boundingRect(c)
# Generating simplified contour to use it in html
epsilon = 0.01 * cv2.arcLength(c, True)
simplified_contour = cv2.approxPolyDP(c, epsilon, True)
# filter out too small fragments
if self.px_to_mm(r_w) <= 3 or self.px_to_mm(r_h) <= 3:
print("Skipping piece #%s as too small (%spx x %s px)" % (
name, r_w, r_h))
return None
if self.px_to_mm(r_w) >= 100 and self.px_to_mm(r_h) >= 100:
print("Skipping piece #%s as too big (%spx x %s px)" % (
name, r_w, r_h))
return None
# position of rect of min area.
# this will provide us angle to straighten image
box_center, bbox, angle = cv2.minAreaRect(c)
# We want our pieces to be "vertical"
if bbox[0] > bbox[1]:
angle += 90
bbox = (bbox[1], bbox[0])
if bbox[1] / float(bbox[0]) > 70:
print("Skipping piece #%s as too too long and narrow" % name)
return None
# Coords of region of interest using which we should crop piece after
# rotation
y1 = math.floor(box_center[1] - bbox[1] / 2)
x1 = math.floor(box_center[0] - bbox[0] / 2)
bbox = tuple(map(int, map(math.ceil, bbox)))
# A mask we use to show only piece we are currently working on
piece_mask = np.zeros([height, width, 1], dtype=np.uint8)
cv2.drawContours(piece_mask, [c], -1, 255, cv2.FILLED)
# apply mask to original image
img_crp = self.orig_img[r_y:r_y + r_h, r_x:r_x + r_w]
piece_in_context = self.save_image(
"pieces/%s_ctx" % name,
self.orig_img[max(r_y - 10, 0):r_y + r_h + 10,
max(r_x - 10, 0):r_x + r_w + 10])
mask = piece_mask[r_y:r_y + r_h, r_x:r_x + r_w]
img_roi = cv2.bitwise_and(img_crp, img_crp, mask=mask)
# Add alpha layer and set it to the mask
img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2BGRA)
img_roi[:, :, 3] = mask[:, :, 0]
# Straighten it
# Because we crop original image before rotation we save us some memory
# and a lot of time but we need to adjust coords of the center of
# new min area rect
M = cv2.getRotationMatrix2D((box_center[0] - r_x,
box_center[1] - r_y), angle, 1)
# And translate an image a bit to make it fit to the bbox again.
# This is done with direct editing of the transform matrix.
# (Wooohoo, I know matrix-fu)
M[0][2] += r_x - x1
M[1][2] += r_y - y1
# Apply rotation/transform/crop
img_roi = cv2.warpAffine(img_roi, M, bbox)
piece_fname = self.save_image("pieces/%s" % name, img_roi, "png")
# FEATURES MAGIC BELOW
#
# Get our mask/contour back after the trasnform
_, _, _, mask = cv2.split(img_roi)
_, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
if len(contours) != 1:
print("Piece #%s has strange contours after transform" % name)
cnt = contours[0]
features_fname = self.save_image("pieces/%s_mask" % name, mask, "png")
base_features = {
# On_sheet_* features describe the min counding box on the sheet.
"on_sheet_x": r_x,
"on_sheet_y": r_y,
"on_sheet_width": r_w,
"on_sheet_height": r_h,
"on_sheet_angle": angle,
"width": img_roi.shape[1],
"height": img_roi.shape[0],
}
tags_suggestions = []
for feat in feature_extractors:
fts, tags = feat.get_info(img_roi, cnt, name)
base_features.update(fts)
tags_suggestions += tags
if tags_suggestions:
print(name, tags_suggestions)
return Shred(
contour=c,
features=base_features,
features_fname=features_fname,
img_roi=img_roi,
name=name,
piece_fname=piece_fname,
piece_in_context_fname=piece_in_context,
sheet=sheet_name,
simplified_contour=simplified_contour,
tags_suggestions=tags_suggestions,
) | Creates a Shred instances from a given contour.
Args:
c: cv2 contour object.
name: string shred name within a sheet.
feature_extractors: iterable of AbstractShredFeature instances.
Returns:
A new Shred instance or None on failure. |
def not_has_branch(branch):
"""Raises `ExistingBranchError` if the specified branch exists."""
if _has_branch(branch):
msg = 'Cannot proceed while {} branch exists; remove and try again.'.format(branch)
raise temple.exceptions.ExistingBranchError(msg) | Raises `ExistingBranchError` if the specified branch exists. |
def before_app_websocket(self, func: Callable) -> Callable:
"""Add a before request websocket to the App.
This is designed to be used as a decorator, and has the same arguments
as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the
app this blueprint is registered on. An example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.before_app_websocket
def before():
...
"""
self.record_once(lambda state: state.app.before_websocket(func))
return func | Add a before request websocket to the App.
This is designed to be used as a decorator, and has the same arguments
as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the
app this blueprint is registered on. An example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.before_app_websocket
def before():
... |
def channels_open(self, room_id, **kwargs):
"""Adds the channel back to the user’s list of channels."""
return self.__call_api_post('channels.open', roomId=room_id, kwargs=kwargs) | Adds the channel back to the user’s list of channels. |
def run(self):
"""主函数"""
# try:
self.fenum.write('\n')
self.fcpp = open(os.path.join(os.path.abspath(self.ctp_dir), 'ThostFtdcUserApiDataType.h'), 'r')
for idx, line in enumerate(self.fcpp):
l = self.process_line(idx, line)
self.f_data_type.write(l)
self.fcpp.close()
self.f_data_type.close()
self.fenum.close()
print('ctp_data_type.py生成过程完成') | 主函数 |
def init_model(engine, create=True, drop=False):
"""
Initializes the shared SQLAlchemy state in the L{coilmq.store.sa.model} module.
@param engine: The SQLAlchemy engine instance.
@type engine: C{sqlalchemy.Engine}
@param create: Whether to create the tables (if they do not exist).
@type create: C{bool}
@param drop: Whether to drop the tables (if they exist).
@type drop: C{bool}
"""
meta.engine = engine
meta.metadata = MetaData(bind=meta.engine)
meta.Session = scoped_session(sessionmaker(bind=meta.engine))
model.setup_tables(create=create, drop=drop) | Initializes the shared SQLAlchemy state in the L{coilmq.store.sa.model} module.
@param engine: The SQLAlchemy engine instance.
@type engine: C{sqlalchemy.Engine}
@param create: Whether to create the tables (if they do not exist).
@type create: C{bool}
@param drop: Whether to drop the tables (if they exist).
@type drop: C{bool} |
def __event_exist(self, event_type):
"""Return the event position, if it exists.
An event exist if:
* end is < 0
* event_type is matching
Return -1 if the item is not found.
"""
for i in range(self.len()):
if self.events_list[i][1] < 0 and self.events_list[i][3] == event_type:
return i
return -1 | Return the event position, if it exists.
An event exist if:
* end is < 0
* event_type is matching
Return -1 if the item is not found. |
def _maybe_throw(self):
"""
Throw any deferred exceptions set via :meth:`_add_err`
"""
if self._err:
ex_cls, ex_obj, ex_bt = self._err
self._err = None
PyCBC.raise_helper(ex_cls, ex_obj, ex_bt) | Throw any deferred exceptions set via :meth:`_add_err` |
def GET(self, func, data):
"""Send GET request to execute Ndrive API
:param func: The function name you want to execute in Ndrive API.
:param params: Parameter data for HTTP request.
:returns: metadata when success or False when failed
"""
if func not in ['getRegisterUserInfo']:
s, message = self.checkAccount()
if s is False:
return False, message
url = nurls[func]
r = self.session.get(url, params = data)
r.encoding = 'utf-8'
if self.debug:
print r.text
try:
try:
metadata = json.loads(r.text)
except:
metadata = json.loads(r.text[r.text.find('{'):-1])
message = metadata['message']
if message == 'success':
return True, metadata['resultvalue']
else:
return False, message
except:
for e in sys.exc_info():
print e
sys.exit(1)
return False, "Error %s: Failed to send GET request" %func | Send GET request to execute Ndrive API
:param func: The function name you want to execute in Ndrive API.
:param params: Parameter data for HTTP request.
:returns: metadata when success or False when failed |
def add_plot_parser(subparsers):
"""Add function 'plot' argument parsers."""
argparser_replot = subparsers.add_parser("replot", help="Reproduce GSEA desktop output figures.")
group_replot = argparser_replot.add_argument_group("Input arguments")
group_replot.add_argument("-i", "--indir", action="store", dest="indir", required=True, metavar='GSEA_dir',
help="The GSEA desktop results directroy that you want to reproduce the figure ")
add_output_option(group_replot)
#add_output_group( argparser_plot )
group_replot.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float',
help='Weighted_score of rank_metrics. Please Use the same value in GSEA. Choose from (0, 1, 1.5, 2),default: 1',)
return | Add function 'plot' argument parsers. |
def get_opcodes_from_bp_table(bp):
"""Given a 2d list structure, collect the opcodes from the best path."""
x = len(bp) - 1
y = len(bp[0]) - 1
opcodes = []
while x != 0 or y != 0:
this_bp = bp[x][y]
opcodes.append(this_bp)
if this_bp[0] == EQUAL or this_bp[0] == REPLACE:
x = x - 1
y = y - 1
elif this_bp[0] == INSERT:
y = y - 1
elif this_bp[0] == DELETE:
x = x - 1
opcodes.reverse()
return opcodes | Given a 2d list structure, collect the opcodes from the best path. |
def add_constraint(self, name, coefficients={}, ub=0):
"""
Add a constraint to the problem. The constrain is formulated as a dictionary of variable names to
linear coefficients.
The constraint can only have an upper bound. To make a constraint with a lower bound, multiply
all coefficients by -1.
"""
if name in self._constraints:
raise ValueError(
"A constraint named " + name + " already exists."
)
self._constraints[name] = len(self._constraints)
self.upper_bounds = np.append(self.upper_bounds, ub)
new_row = np.array([[coefficients.get(name, 0) for name in self._variables]])
self._add_row_to_A(new_row)
self._reset_solution() | Add a constraint to the problem. The constrain is formulated as a dictionary of variable names to
linear coefficients.
The constraint can only have an upper bound. To make a constraint with a lower bound, multiply
all coefficients by -1. |
def register(self, config_file, contexts, config_template=None):
"""
Register a config file with a list of context generators to be called
during rendering.
config_template can be used to load a template from a string instead of
using template loaders and template files.
:param config_file (str): a path where a config file will be rendered
:param contexts (list): a list of context dictionaries with kv pairs
:param config_template (str): an optional template string to use
"""
self.templates[config_file] = OSConfigTemplate(
config_file=config_file,
contexts=contexts,
config_template=config_template
)
log('Registered config file: {}'.format(config_file),
level=INFO) | Register a config file with a list of context generators to be called
during rendering.
config_template can be used to load a template from a string instead of
using template loaders and template files.
:param config_file (str): a path where a config file will be rendered
:param contexts (list): a list of context dictionaries with kv pairs
:param config_template (str): an optional template string to use |
def pmdec(self,*args,**kwargs):
"""
NAME:
pmdec
PURPOSE:
return proper motion in declination (in mas/yr)
INPUT:
t - (optional) time at which to get pmdec (can be Quantity)
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
OUTPUT:
pm_dec(t) in mas/yr
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
out= self._orb.pmdec(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | NAME:
pmdec
PURPOSE:
return proper motion in declination (in mas/yr)
INPUT:
t - (optional) time at which to get pmdec (can be Quantity)
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
OUTPUT:
pm_dec(t) in mas/yr
HISTORY:
2011-02-24 - Written - Bovy (NYU) |
def register(self, name):
"""Decorator for registering a function with PyPhi.
Args:
name (string): The name of the function
"""
def register_func(func):
self.store[name] = func
return func
return register_func | Decorator for registering a function with PyPhi.
Args:
name (string): The name of the function |
def get_auth_basic(self):
"""return the username and password of a basic auth header if it exists"""
username = ''
password = ''
auth_header = self.get_header('authorization')
if auth_header:
m = re.search(r"^Basic\s+(\S+)$", auth_header, re.I)
if m:
auth_str = Base64.decode(m.group(1))
username, password = auth_str.split(':', 1)
return username, password | return the username and password of a basic auth header if it exists |
def set(self, tclass, tnum, tlvt=0, tdata=b''):
"""set the values of the tag."""
if isinstance(tdata, bytearray):
tdata = bytes(tdata)
elif not isinstance(tdata, bytes):
raise TypeError("tag data must be bytes or bytearray")
self.tagClass = tclass
self.tagNumber = tnum
self.tagLVT = tlvt
self.tagData = tdata | set the values of the tag. |
def show_linkinfo_output_show_link_info_linkinfo_domain_reachable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_linkinfo = ET.Element("show_linkinfo")
config = show_linkinfo
output = ET.SubElement(show_linkinfo, "output")
show_link_info = ET.SubElement(output, "show-link-info")
linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid")
linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid')
linkinfo_domain_reachable = ET.SubElement(show_link_info, "linkinfo-domain-reachable")
linkinfo_domain_reachable.text = kwargs.pop('linkinfo_domain_reachable')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def add_exit(self, guard, dst, jk, ip):
"""
Add an exit out of the middle of an IRSB.
(e.g., a conditional jump)
:param guard: An expression, the exit is taken if true
:param dst: the destination of the exit (a Const)
:param jk: the JumpKind of this exit (probably Ijk_Boring)
:param ip: The address of this exit's source
"""
self.irsb.statements.append(Exit(guard, dst.con, jk, ip)) | Add an exit out of the middle of an IRSB.
(e.g., a conditional jump)
:param guard: An expression, the exit is taken if true
:param dst: the destination of the exit (a Const)
:param jk: the JumpKind of this exit (probably Ijk_Boring)
:param ip: The address of this exit's source |
def delay(self, func, args=None, kwargs=None, queue=None,
hard_timeout=None, unique=None, lock=None, lock_key=None,
when=None, retry=None, retry_on=None, retry_method=None,
max_queue_size=None):
"""
Queues a task. See README.rst for an explanation of the options.
"""
task = Task(self, func, args=args, kwargs=kwargs, queue=queue,
hard_timeout=hard_timeout, unique=unique,
lock=lock, lock_key=lock_key,
retry=retry, retry_on=retry_on, retry_method=retry_method)
task.delay(when=when, max_queue_size=max_queue_size)
return task | Queues a task. See README.rst for an explanation of the options. |
def has_code(state, text, pattern=True, not_typed_msg=None):
"""Test the student code.
Tests if the student typed a (pattern of) text. It is advised to use ``has_equal_ast()`` instead of ``has_code()``,
as it is more robust to small syntactical differences that don't change the code's behavior.
Args:
text (str): the text that is searched for
pattern (bool): if True (the default), the text is treated as a pattern. If False, it is treated as plain text.
not_typed_msg (str): feedback message to be displayed if the student did not type the text.
:Example:
Student code and solution code::
y = 1 + 2 + 3
SCT::
# Verify that student code contains pattern (not robust!!):
Ex().has_code(r"1\\s*\\+2\\s*\\+3")
"""
if not not_typed_msg:
if pattern:
not_typed_msg = "Could not find the correct pattern in your code."
else:
not_typed_msg = "Could not find the following text in your code: %r" % text
student_code = state.student_code
_msg = state.build_message(not_typed_msg)
state.do_test(
StringContainsTest(student_code, text, pattern, Feedback(_msg, state))
)
return state | Test the student code.
Tests if the student typed a (pattern of) text. It is advised to use ``has_equal_ast()`` instead of ``has_code()``,
as it is more robust to small syntactical differences that don't change the code's behavior.
Args:
text (str): the text that is searched for
pattern (bool): if True (the default), the text is treated as a pattern. If False, it is treated as plain text.
not_typed_msg (str): feedback message to be displayed if the student did not type the text.
:Example:
Student code and solution code::
y = 1 + 2 + 3
SCT::
# Verify that student code contains pattern (not robust!!):
Ex().has_code(r"1\\s*\\+2\\s*\\+3") |
def __embed_frond(node_u, node_w, dfs_data, as_branch_marker=False):
"""Embeds a frond uw into either LF or RF. Returns whether the embedding was successful."""
d_u = D(node_u, dfs_data)
d_w = D(node_w, dfs_data)
comp_d_w = abs(d_w)
if as_branch_marker:
d_w *= -1
if dfs_data['last_inserted_side'] == 'LF':
__insert_frond_RF(d_w, d_u, dfs_data)
else:
# We default to inserting a branch marker on the left side, unless we know otherwise
__insert_frond_LF(d_w, d_u, dfs_data)
return True
LF = dfs_data['LF']
m = dfs_data['FG']['m']
l_w = lw(dfs_data)
r_w = rw(dfs_data)
u_m = u(m, dfs_data)
x_m = fn_x(m, dfs_data)
# There are multiple cases for both u and w
# --Detect the case for u and store it for handling once the case for w is determined
case_1 = False
case_2 = False
case_3 = False
if d_u > u_m and d_u > x_m:
case_1 = True
elif d_u <= u_m and d_u > x_m:
case_2 = True
elif d_u > u_m and d_u <= x_m:
case_3 = True
else:
# We should never get here, return false because there's no way we can embed this frond
#print "Invalid u-case detected: (d_u, u_m, x_m): ({}, {}, {})".format(d_u, u_m, x_m)
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
return False
# --Detect the case for w and process the edge appropriately
if comp_d_w >= l_w and comp_d_w >= r_w:
# Case 4
#print "w-case 4 reached"
# --We do the same thing for all three u-cases: Add the frond to the left side
__insert_frond_LF(d_w, d_u, dfs_data)
dfs_data['FG']['m'] += 1
m = dfs_data['FG']['m']
n = dfs_data['graph'].num_nodes()
Lm = {'u': d_w, 'v': d_u}
Rm = {'x': n, 'y': 0} # See page 17 for how we deal with Ri being empty
#Rm = {}
dfs_data['FG'][m] = [Lm, Rm]
return True
elif comp_d_w >= l_w and comp_d_w < r_w:
# Case 5
#print "w-case 5 reached"
return __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data)
elif comp_d_w < l_w and comp_d_w >= r_w:
# Case 6
#print "w-case 6 reached"
return __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data)
elif comp_d_w < l_w and comp_d_w < r_w:
# Case 7
#print "w-case 7 reached"
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
#print "(d_w, l_w, r_w): ({}, {}, {})".format(d_w, l_w, r_w)
#print "(d_u, u_m, x_m, m): ({}, {}, {}, {})".format(d_u, u_m, x_m, m)
while comp_d_w < l_w and comp_d_w < r_w:
if d_u > u_m and d_u > x_m:
#print "Nonplanar case reached: u-case 1, w-case 7"
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
#print "(d_w, l_w, r_w): ({}, {}, {})".format(d_w, l_w, r_w)
#print "(d_u, u_m, x_m, m): ({}, {}, {}, {})".format(d_u, u_m, x_m, m)
return False
switch_sides(d_u, dfs_data)
# --Update the local variables fo the next loop iteration
l_w = lw(dfs_data)
r_w = rw(dfs_data)
m = dfs_data['FG']['m']
u_m = u(m, dfs_data)
x_m = fn_x(m, dfs_data)
case_1 = False
case_2 = False
case_3 = False
if d_u <= u_m and d_u > x_m:
case_2 = True
elif d_u > u_m and d_u <= x_m:
case_3 = True
if comp_d_w >= l_w and comp_d_w < r_w:
# Case 5 redux
#print "w-case 5 redux reached"
return __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data)
if comp_d_w < l_w and comp_d_w >= r_w:
# Case 6 redux
#print "w-case 6 redux reached"
return __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data)
else:
# We should never get here, return false because there's no way we can embed this frond
#print "Invalid w-case detected"
return False
# We really shouldn't get to this point, but this is a catch-all just in case
#print "Failure catchall reached"
return False | Embeds a frond uw into either LF or RF. Returns whether the embedding was successful. |
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(array, placement=[i])
new_blocks.append(block)
return new_blocks | return an array of blocks that potentially have different dtypes (and
are sparse) |
def _leftMouseDragged(self, stopCoord, strCoord, speed):
"""Private method to handle generic mouse left button dragging and
dropping.
Parameters: stopCoord(x,y) drop point
Optional: strCoord (x, y) drag point, default (0,0) get current
mouse position
speed (int) 1 to unlimit, simulate mouse moving
action from some special requirement
Returns: None
"""
# To direct output to the correct application need the PSN:
appPid = self._getPid()
# Get current position as start point if strCoord not given
if strCoord == (0, 0):
loc = AppKit.NSEvent.mouseLocation()
strCoord = (loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y)
# To direct output to the correct application need the PSN:
appPid = self._getPid()
# Press left button down
pressLeftButton = Quartz.CGEventCreateMouseEvent(
None,
Quartz.kCGEventLeftMouseDown,
strCoord,
Quartz.kCGMouseButtonLeft
)
# Queue the events
Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap, pressLeftButton)
# Wait for reponse of system, a fuzzy icon appears
time.sleep(5)
# Simulate mouse moving speed, k is slope
speed = round(1 / float(speed), 2)
xmoved = stopCoord[0] - strCoord[0]
ymoved = stopCoord[1] - strCoord[1]
if ymoved == 0:
raise ValueError('Not support horizontal moving')
else:
k = abs(ymoved / xmoved)
if xmoved != 0:
for xpos in range(int(abs(xmoved))):
if xmoved > 0 and ymoved > 0:
currcoord = (strCoord[0] + xpos, strCoord[1] + xpos * k)
elif xmoved > 0 and ymoved < 0:
currcoord = (strCoord[0] + xpos, strCoord[1] - xpos * k)
elif xmoved < 0 and ymoved < 0:
currcoord = (strCoord[0] - xpos, strCoord[1] - xpos * k)
elif xmoved < 0 and ymoved > 0:
currcoord = (strCoord[0] - xpos, strCoord[1] + xpos * k)
# Drag with left button
dragLeftButton = Quartz.CGEventCreateMouseEvent(
None,
Quartz.kCGEventLeftMouseDragged,
currcoord,
Quartz.kCGMouseButtonLeft
)
Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap,
dragLeftButton)
# Wait for reponse of system
time.sleep(speed)
else:
raise ValueError('Not support vertical moving')
upLeftButton = Quartz.CGEventCreateMouseEvent(
None,
Quartz.kCGEventLeftMouseUp,
stopCoord,
Quartz.kCGMouseButtonLeft
)
# Wait for reponse of system, a plus icon appears
time.sleep(5)
# Up left button up
Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap, upLeftButton) | Private method to handle generic mouse left button dragging and
dropping.
Parameters: stopCoord(x,y) drop point
Optional: strCoord (x, y) drag point, default (0,0) get current
mouse position
speed (int) 1 to unlimit, simulate mouse moving
action from some special requirement
Returns: None |
def tagfunc(nargs=None, ndefs=None, nouts=None):
"""
decorate of tagged function
"""
def wrapper(f):
return wraps(f)(FunctionWithTag(f, nargs=nargs, nouts=nouts, ndefs=ndefs))
return wrapper | decorate of tagged function |
def _parser():
"""Parse command-line options."""
launcher = 'pip%s-utils' % sys.version_info.major
parser = argparse.ArgumentParser(
description='%s.' % __description__,
epilog='See `%s COMMAND --help` for help '
'on a specific subcommand.' % launcher,
prog=launcher)
parser.add_argument(
'--version',
action='version',
version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers()
# dependants
parser_dependants = subparsers.add_parser(
'dependants',
add_help=False,
help='list dependants of package')
parser_dependants.add_argument(
'package',
metavar='PACKAGE',
type=_distribution)
parser_dependants.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_dependants.set_defaults(
func=command_dependants)
# dependents
parser_dependents = subparsers.add_parser(
'dependents',
add_help=False,
help='list dependents of package')
parser_dependents.add_argument(
'package',
metavar='PACKAGE',
type=_distribution)
parser_dependents.add_argument(
'-i', '--info',
action='store_true',
help='show version requirements')
parser_dependents.add_argument(
'-r', '--recursive',
action='store_true',
help='list dependencies recursively')
parser_dependents.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_dependents.set_defaults(
func=command_dependents)
# locate
parser_locate = subparsers.add_parser(
'locate',
add_help=False,
help='identify packages that file belongs to')
parser_locate.add_argument(
'file',
metavar='FILE',
type=argparse.FileType('r'))
parser_locate.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_locate.set_defaults(
func=command_locate)
# outdated
parser_outdated = subparsers.add_parser(
'outdated',
add_help=False,
help='list outdated packages that may be updated')
parser_outdated.add_argument(
'-b', '--brief',
action='store_true',
help='show package name only')
group = parser_outdated.add_mutually_exclusive_group()
group.add_argument(
'-a', '--all',
action='store_true',
help='list all outdated packages')
group.add_argument(
'-p', '--pinned',
action='store_true',
help='list outdated packages unable to be updated')
group.add_argument(
'-U', '--upgrade',
action='store_true',
dest='update',
help='update packages that can be updated'
)
parser_outdated.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_outdated.set_defaults(
func=command_outdated)
# parents
parser_parents = subparsers.add_parser(
'parents',
add_help=False,
help='list packages lacking dependants')
parser_parents.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_parents.set_defaults(
func=command_parents)
return parser | Parse command-line options. |
def execute(self):
"""Generate local DB, pulling metadata and data from RWSConnection"""
logging.info('Requesting view metadata for project %s' % self.project_name)
project_csv_meta = self.rws_connection.send_request(ProjectMetaDataRequest(self.project_name))
# Process it into a set of tables
self.db_adapter.processMetaData(project_csv_meta)
# Get the data for the study
for dataset_name in self.db_adapter.datasets.keys():
logging.info('Requesting data from dataset %s' % dataset_name)
form_name, _type = self.name_type_from_viewname(dataset_name)
form_data = self.rws_connection.send_request(
FormDataRequest(self.project_name, self.environment, _type, form_name))
# Now process the form_data into the db of choice
logging.info('Populating dataset %s' % dataset_name)
self.db_adapter.processFormData(form_data, dataset_name)
logging.info('Process complete') | Generate local DB, pulling metadata and data from RWSConnection |
def edit_account_info(self, short_name=None, author_name=None,
author_url=None):
""" Update information about a Telegraph account.
Pass only the parameters that you want to edit
:param short_name: Account name, helps users with several
accounts remember which they are currently using.
Displayed to the user above the "Edit/Publish"
button on Telegra.ph, other users don't see this name
:param author_name: Default author name used when creating new articles
:param author_url: Default profile link, opened when users click on the
author's name below the title. Can be any link,
not necessarily to a Telegram profile or channels
"""
return self._telegraph.method('editAccountInfo', values={
'short_name': short_name,
'author_name': author_name,
'author_url': author_url
}) | Update information about a Telegraph account.
Pass only the parameters that you want to edit
:param short_name: Account name, helps users with several
accounts remember which they are currently using.
Displayed to the user above the "Edit/Publish"
button on Telegra.ph, other users don't see this name
:param author_name: Default author name used when creating new articles
:param author_url: Default profile link, opened when users click on the
author's name below the title. Can be any link,
not necessarily to a Telegram profile or channels |
def fetch_mim_files(api_key, mim2genes=False, mimtitles=False, morbidmap=False, genemap2=False):
"""Fetch the necessary mim files using a api key
Args:
api_key(str): A api key necessary to fetch mim data
Returns:
mim_files(dict): A dictionary with the neccesary files
"""
LOG.info("Fetching OMIM files from https://omim.org/")
mim2genes_url = 'https://omim.org/static/omim/data/mim2gene.txt'
mimtitles_url= 'https://data.omim.org/downloads/{0}/mimTitles.txt'.format(api_key)
morbidmap_url = 'https://data.omim.org/downloads/{0}/morbidmap.txt'.format(api_key)
genemap2_url = 'https://data.omim.org/downloads/{0}/genemap2.txt'.format(api_key)
mim_files = {}
mim_urls = {}
if mim2genes is True:
mim_urls['mim2genes'] = mim2genes_url
if mimtitles is True:
mim_urls['mimtitles'] = mimtitles_url
if morbidmap is True:
mim_urls['morbidmap'] = morbidmap_url
if genemap2 is True:
mim_urls['genemap2'] = genemap2_url
for file_name in mim_urls:
url = mim_urls[file_name]
mim_files[file_name] = fetch_resource(url)
return mim_files | Fetch the necessary mim files using a api key
Args:
api_key(str): A api key necessary to fetch mim data
Returns:
mim_files(dict): A dictionary with the neccesary files |
def generate_data_key(key_id, encryption_context=None, number_of_bytes=None,
key_spec=None, grant_tokens=None, region=None, key=None,
keyid=None, profile=None):
'''
Generate a secure data key.
CLI example::
salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
data_key = conn.generate_data_key(
key_id,
encryption_context=encryption_context,
number_of_bytes=number_of_bytes,
key_spec=key_spec,
grant_tokens=grant_tokens
)
r['data_key'] = data_key
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r | Generate a secure data key.
CLI example::
salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128 |
def _get_axis_bounds(self, dim, bunch):
"""Return the min/max of an axis."""
if dim in self.attributes:
# Attribute: specified lim, or compute the min/max.
vmin, vmax = bunch['lim']
assert vmin is not None
assert vmax is not None
return vmin, vmax
# PC dimensions: use the common scaling.
return (-1. / self.scaling, +1. / self.scaling) | Return the min/max of an axis. |
def load_remote_db(self):
"""
Load remote S3 DB
"""
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4")
s3 = boto3.resource(
's3',
config=botocore.client.Config(signature_version=signature_version),
)
if '/tmp/' not in self.settings_dict['NAME']:
try:
etag = ''
if os.path.isfile('/tmp/' + self.settings_dict['NAME']):
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read())
# In general the ETag is the md5 of the file, in some cases it's not,
# and in that case we will just need to reload the file, I don't see any other way
etag = m.hexdigest()
obj = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['NAME'])
obj_bytes = obj.get(IfNoneMatch=etag)["Body"] # Will throw E on 304 or 404
with open('/tmp/' + self.settings_dict['NAME'], 'wb') as f:
f.write(obj_bytes.read())
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read())
self.db_hash = m.hexdigest()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "304":
logging.debug("ETag matches md5 of local copy, using local copy of DB!")
self.db_hash = etag
else:
logging.debug("Couldn't load remote DB object.")
except Exception as e:
# Weird one
logging.debug(e)
# SQLite DatabaseWrapper will treat our tmp as normal now
# Check because Django likes to call this function a lot more than it should
if '/tmp/' not in self.settings_dict['NAME']:
self.settings_dict['REMOTE_NAME'] = self.settings_dict['NAME']
self.settings_dict['NAME'] = '/tmp/' + self.settings_dict['NAME']
# Make sure it exists if it doesn't yet
if not os.path.isfile(self.settings_dict['NAME']):
open(self.settings_dict['NAME'], 'a').close()
logging.debug("Loaded remote DB!") | Load remote S3 DB |
def _query_ned_and_add_results_to_database(
self,
batchCount):
""" query ned and add results to database
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_query_ned_and_add_results_to_database`` method')
tableName = self.dbTableName
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
# QUERY NED WITH BATCH
totalCount = len(self.theseIds)
print "requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals()
search = namesearch(
log=self.log,
names=self.theseIds.keys(),
quiet=True
)
results = search.get()
print "results returned from ned -- starting to add to database" % locals()
# CLEAN THE RETURNED DATA AND UPDATE DATABASE
totalCount = len(results)
count = 0
sqlQuery = ""
dictList = []
colList = ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter",
"ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "in_ned"]
if not len(results):
for k, v in self.theseIds.iteritems():
dictList.append({
"in_ned": 0,
"primaryID": v
})
for thisDict in results:
thisDict["tableName"] = tableName
count += 1
for k, v in thisDict.iteritems():
if not v or len(v) == 0:
thisDict[k] = "null"
if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v):
thisDict[k] = v.replace(":", "").replace(
"?", "").replace("<", "")
if isinstance(v, str) and '"' in v:
thisDict[k] = v.replace('"', '\\"')
if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]:
if thisDict["ra"] != "null" and thisDict["dec"] != "null":
thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=thisDict["ra"]
)
thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=thisDict["dec"]
)
else:
thisDict["raDeg"] = None
thisDict["decDeg"] = None
thisDict["in_ned"] = 1
thisDict["eb_v"] = thisDict["eb-v"]
row = {}
row["primary_ned_id"] = thisDict["input_name"]
try:
row["primaryID"] = self.theseIds[thisDict["input_name"]]
for c in colList:
if thisDict[c] == "null":
row[c] = None
else:
row[c] = thisDict[c]
dictList.append(row)
except:
g = thisDict["input_name"]
self.log.error(
"Cannot find database table %(tableName)s primaryID for '%(g)s'\n\n" % locals())
dictList.append({
"in_ned": 0,
"primary_ned_id": thisDict["input_name"]
})
else:
dictList.append({
"primary_ned_id": thisDict["input_name"],
"in_ned": 0,
"primaryID": self.theseIds[thisDict["input_name"]]
})
self.log.debug(
'completed the ``_query_ned_and_add_results_to_database`` method')
return dictList | query ned and add results to database
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring |
def list_fonts():
"""List system fonts
Returns
-------
fonts : list of str
List of system fonts.
"""
vals = _list_fonts()
for font in _vispy_fonts:
vals += [font] if font not in vals else []
vals = sorted(vals, key=lambda s: s.lower())
return vals | List system fonts
Returns
-------
fonts : list of str
List of system fonts. |
def circ_corrcl(x, y, tail='two-sided'):
"""Correlation coefficient between one circular and one linear variable
random variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (linear)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Python code borrowed from brainpipe (based on the MATLAB toolbox CircStats)
Please note that NaN are automatically removed from datasets.
Examples
--------
Compute the r and p-value between one circular and one linear variables.
>>> from pingouin import circ_corrcl
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> y = [1.593, 1.291, -0.248, -2.892, 0.102]
>>> r, pval = circ_corrcl(x, y)
>>> print(r, pval)
0.109 0.9708899750629236
"""
from scipy.stats import pearsonr, chi2
x = np.asarray(x)
y = np.asarray(y)
# Check size
if x.size != y.size:
raise ValueError('x and y must have the same length.')
# Remove NA
x, y = remove_na(x, y, paired=True)
n = x.size
# Compute correlation coefficent for sin and cos independently
rxs = pearsonr(y, np.sin(x))[0]
rxc = pearsonr(y, np.cos(x))[0]
rcs = pearsonr(np.sin(x), np.cos(x))[0]
# Compute angular-linear correlation (equ. 27.47)
r = np.sqrt((rxc**2 + rxs**2 - 2 * rxc * rxs * rcs) / (1 - rcs**2))
# Compute p-value
pval = chi2.sf(n * r**2, 2)
pval = pval / 2 if tail == 'one-sided' else pval
return np.round(r, 3), pval | Correlation coefficient between one circular and one linear variable
random variables.
Parameters
----------
x : np.array
First circular variable (expressed in radians)
y : np.array
Second circular variable (linear)
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
r : float
Correlation coefficient
pval : float
Uncorrected p-value
Notes
-----
Python code borrowed from brainpipe (based on the MATLAB toolbox CircStats)
Please note that NaN are automatically removed from datasets.
Examples
--------
Compute the r and p-value between one circular and one linear variables.
>>> from pingouin import circ_corrcl
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> y = [1.593, 1.291, -0.248, -2.892, 0.102]
>>> r, pval = circ_corrcl(x, y)
>>> print(r, pval)
0.109 0.9708899750629236 |
def process(self):
"""Entry point of SelectableSelector"""
if WINDOWS:
select_inputs = []
for i in self.inputs:
if not isinstance(i, SelectableObject):
warning("Unknown ignored object type: %s", type(i))
elif i.__selectable_force_select__:
# Then use select.select
select_inputs.append(i)
elif not self.remain and i.check_recv():
self.results.append(i)
else:
i.wait_return(self._exit_door)
if select_inputs:
# Use default select function
self.results.extend(select(select_inputs, [], [], self.remain)[0]) # noqa: E501
if not self.remain:
return self.results
threading.Thread(target=self._timeout_thread, args=(self.remain,)).start() # noqa: E501
if not self._ended:
self.available_lock.acquire()
return self.results
else:
r, _, _ = select(self.inputs, [], [], self.remain)
return r | Entry point of SelectableSelector |
def create(cls, name, template=None):
"""Creates an LXC"""
command = ['lxc-create', '-n', name]
if template:
command.extend(['-t', template])
subwrap.run(command) | Creates an LXC |
def filter(self, scored_list):
'''
Filtering with top-n ranking.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result.
'''
top_n_key = -1 * self.top_n
top_n_list = sorted(scored_list, key=lambda x: x[1])[top_n_key:]
result_list = sorted(top_n_list, key=lambda x: x[0])
return result_list | Filtering with top-n ranking.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result. |
def content(self, content):
"""
Sets the content of this SupportLevelPage.
:param content: The content of this SupportLevelPage.
:type: list[str]
"""
allowed_values = ["UNRELEASED", "EARLYACCESS", "SUPPORTED", "EXTENDED_SUPPORT", "EOL"]
if not set(content).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `content` [{0}], must be a subset of [{1}]"
.format(", ".join(map(str, set(content)-set(allowed_values))),
", ".join(map(str, allowed_values)))
)
self._content = content | Sets the content of this SupportLevelPage.
:param content: The content of this SupportLevelPage.
:type: list[str] |
def get_tabs(self, request, **kwargs):
"""Returns the initialized tab group for this view."""
if self._tab_group is None:
self._tab_group = self.tab_group_class(request, **kwargs)
return self._tab_group | Returns the initialized tab group for this view. |
def remove_dashboard_tag(self, id, tag_value, **kwargs): # noqa: E501
"""Remove a tag from a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
else:
(data) = self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data | Remove a tag from a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread. |
async def write(self, item):
"""
Write an item in the queue.
:param item: The item.
"""
await self._queue.put(item)
self._can_read.set()
if self._queue.full():
self._can_write.clear() | Write an item in the queue.
:param item: The item. |
def byname(nlist):
"""
**Deprecated:** Convert a list of named objects into an ordered dictionary
indexed by name.
This function is internal and has been deprecated in pywbem 0.12.
"""
warnings.warn("The internal byname() function has been deprecated, with "
"no replacement.", DeprecationWarning,
stacklevel=_stacklevel_above_module(__name__))
return OrderedDict([(x.name, x) for x in nlist]) | **Deprecated:** Convert a list of named objects into an ordered dictionary
indexed by name.
This function is internal and has been deprecated in pywbem 0.12. |
def iter_chunks_class(self):
"""
Yield each readable chunk present in the region.
Chunks that can not be read for whatever reason are silently skipped.
This function returns a :class:`nbt.chunk.Chunk` instance.
"""
for m in self.get_metadata():
try:
yield self.chunkclass(self.get_chunk(m.x, m.z))
except RegionFileFormatError:
pass | Yield each readable chunk present in the region.
Chunks that can not be read for whatever reason are silently skipped.
This function returns a :class:`nbt.chunk.Chunk` instance. |
def _win32_read_junction(path):
"""
Returns the location that the junction points, raises ValueError if path is
not a junction.
CommandLine:
python -m ubelt._win32_links _win32_read_junction
Example:
>>> # xdoc: +REQUIRES(WIN32)
>>> import ubelt as ub
>>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction')
>>> ub.delete(root)
>>> ub.ensuredir(root)
>>> dpath = join(root, 'dpath')
>>> djunc = join(root, 'djunc')
>>> ub.ensuredir(dpath)
>>> _win32_junction(dpath, djunc)
>>> path = djunc
>>> pointed = _win32_read_junction(path)
>>> print('pointed = {!r}'.format(pointed))
"""
if not jwfs.is_reparse_point(path):
raise ValueError('not a junction')
# --- Older version based on using shell commands ---
# if not exists(path):
# if six.PY2:
# raise OSError('Cannot find path={}'.format(path))
# else:
# raise FileNotFoundError('Cannot find path={}'.format(path))
# target_name = os.path.basename(path)
# for type_or_size, name, pointed in _win32_dir(path, '*'):
# if type_or_size == '<JUNCTION>' and name == target_name:
# return pointed
# raise ValueError('not a junction')
# new version using the windows api
handle = jwfs.api.CreateFile(
path, 0, 0, None, jwfs.api.OPEN_EXISTING,
jwfs.api.FILE_FLAG_OPEN_REPARSE_POINT |
jwfs.api.FILE_FLAG_BACKUP_SEMANTICS,
None)
if handle == jwfs.api.INVALID_HANDLE_VALUE:
raise WindowsError()
res = jwfs.reparse.DeviceIoControl(
handle, jwfs.api.FSCTL_GET_REPARSE_POINT, None, 10240)
bytes = jwfs.create_string_buffer(res)
p_rdb = jwfs.cast(bytes, jwfs.POINTER(jwfs.api.REPARSE_DATA_BUFFER))
rdb = p_rdb.contents
if rdb.tag not in [2684354563, jwfs.api.IO_REPARSE_TAG_SYMLINK]:
raise RuntimeError(
"Expected <2684354563 or 2684354572>, but got %d" % rdb.tag)
jwfs.handle_nonzero_success(jwfs.api.CloseHandle(handle))
subname = rdb.get_substitute_name()
# probably has something to do with long paths, not sure
if subname.startswith('?\\'):
subname = subname[2:]
return subname | Returns the location that the junction points, raises ValueError if path is
not a junction.
CommandLine:
python -m ubelt._win32_links _win32_read_junction
Example:
>>> # xdoc: +REQUIRES(WIN32)
>>> import ubelt as ub
>>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction')
>>> ub.delete(root)
>>> ub.ensuredir(root)
>>> dpath = join(root, 'dpath')
>>> djunc = join(root, 'djunc')
>>> ub.ensuredir(dpath)
>>> _win32_junction(dpath, djunc)
>>> path = djunc
>>> pointed = _win32_read_junction(path)
>>> print('pointed = {!r}'.format(pointed)) |
def from_json_file(file: TextIO, check_version=True) -> BELGraph:
"""Build a graph from the Node-Link JSON contained in the given file."""
graph_json_dict = json.load(file)
return from_json(graph_json_dict, check_version=check_version) | Build a graph from the Node-Link JSON contained in the given file. |
def _generic_signal_handler(self, signal_type):
"""
Function for handling both SIGTERM and SIGINT
"""
print("</pre>")
message = "Got " + signal_type + ". Failing gracefully..."
self.timestamp(message)
self.fail_pipeline(KeyboardInterrupt(signal_type), dynamic_recover=True)
sys.exit(1) | Function for handling both SIGTERM and SIGINT |
def stop_playback(self):
"""Stop playback from the audio sink."""
self._sink.flush()
self._sink.stop()
self._playing = False | Stop playback from the audio sink. |
def __setAsOrphaned(self):
"""
Sets the current model as orphaned. This is called when the scheduler is
about to kill the process to reallocate the worker to a different process.
"""
cmplReason = ClientJobsDAO.CMPL_REASON_ORPHAN
cmplMessage = "Killed by Scheduler"
self._jobsDAO.modelSetCompleted(self._modelID, cmplReason, cmplMessage) | Sets the current model as orphaned. This is called when the scheduler is
about to kill the process to reallocate the worker to a different process. |
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, (basestring,AsyncResult)):
jobs = [jobs]
bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
for t in targets:
self.session.send(self._control_socket, 'abort_request',
content=content, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error | Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs. |
def clean_egginfo(self):
"""Clean .egginfo directory"""
dir_name = os.path.join(self.root, self.get_egginfo_dir())
self._clean_directory(dir_name) | Clean .egginfo directory |
def Run(self):
"Execute the action"
inputs = self.GetInput()
return SendInput(
len(inputs),
ctypes.byref(inputs),
ctypes.sizeof(INPUT)) | Execute the action |
def FindEnumTypeByName(self, full_name):
"""Loads the named enum descriptor from the pool.
Args:
full_name: The full name of the enum descriptor to load.
Returns:
The enum descriptor for the named type.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._enum_descriptors:
self.FindFileContainingSymbol(full_name)
return self._enum_descriptors[full_name] | Loads the named enum descriptor from the pool.
Args:
full_name: The full name of the enum descriptor to load.
Returns:
The enum descriptor for the named type. |
def _scipy_distribution_positional_args_from_dict(distribution, params):
"""Helper function that returns positional arguments for a scipy distribution using a dict of parameters.
See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\
to see an example of scipy's positional arguments. This function returns the arguments specified by the \
scipy.stat.distribution.cdf() for tha distribution.
Args:
distribution (string): \
The scipy distribution name.
params (dict): \
A dict of named parameters.
Raises:
AttributeError: \
If an unsupported distribution is provided.
"""
params['loc'] = params.get('loc', 0)
if 'scale' not in params:
params['scale'] = 1
if distribution == 'norm':
return params['mean'], params['std_dev']
elif distribution == 'beta':
return params['alpha'], params['beta'], params['loc'], params['scale']
elif distribution == 'gamma':
return params['alpha'], params['loc'], params['scale']
# elif distribution == 'poisson':
# return params['lambda'], params['loc']
elif distribution == 'uniform':
return params['min'], params['max']
elif distribution == 'chi2':
return params['df'], params['loc'], params['scale']
elif distribution == 'expon':
return params['loc'], params['scale'] | Helper function that returns positional arguments for a scipy distribution using a dict of parameters.
See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\
to see an example of scipy's positional arguments. This function returns the arguments specified by the \
scipy.stat.distribution.cdf() for tha distribution.
Args:
distribution (string): \
The scipy distribution name.
params (dict): \
A dict of named parameters.
Raises:
AttributeError: \
If an unsupported distribution is provided. |
def source_start(base='', book_id='book'):
"""
chooses a starting source file in the 'base' directory for id = book_id
"""
repo_htm_path = "{book_id}-h/{book_id}-h.htm".format(book_id=book_id)
possible_paths = ["book.asciidoc",
repo_htm_path,
"{}-0.txt".format(book_id),
"{}-8.txt".format(book_id),
"{}.txt".format(book_id),
"{}-pdf.pdf".format(book_id),
]
# return the first match
for path in possible_paths:
fullpath = os.path.join(base, path)
if os.path.exists(fullpath):
return path
return None | chooses a starting source file in the 'base' directory for id = book_id |
def set_data(self, capacity, voltage=None,
capacity_label="q", voltage_label="v"
):
"""Set the data"""
logging.debug("setting data (capacity and voltage)")
if isinstance(capacity, pd.DataFrame):
logging.debug("recieved a pandas.DataFrame")
self.capacity = capacity[capacity_label]
self.voltage = capacity[voltage_label]
else:
assert len(capacity) == len(voltage)
self.capacity = capacity
self.voltage = voltage | Set the data |
def filter_kwargs(_function, *args, **kwargs):
"""Given a function and args and keyword args to pass to it, call the function
but using only the keyword arguments which it accepts. This is equivalent
to redefining the function with an additional \*\*kwargs to accept slop
keyword args.
If the target function already accepts \*\*kwargs parameters, no filtering
is performed.
Parameters
----------
_function : callable
Function to call. Can take in any number of args or kwargs
"""
if has_kwargs(_function):
return _function(*args, **kwargs)
# Get the list of function arguments
func_code = six.get_function_code(_function)
function_args = func_code.co_varnames[:func_code.co_argcount]
# Construct a dict of those kwargs which appear in the function
filtered_kwargs = {}
for kwarg, value in list(kwargs.items()):
if kwarg in function_args:
filtered_kwargs[kwarg] = value
# Call the function with the supplied args and the filtered kwarg dict
return _function(*args, **filtered_kwargs) | Given a function and args and keyword args to pass to it, call the function
but using only the keyword arguments which it accepts. This is equivalent
to redefining the function with an additional \*\*kwargs to accept slop
keyword args.
If the target function already accepts \*\*kwargs parameters, no filtering
is performed.
Parameters
----------
_function : callable
Function to call. Can take in any number of args or kwargs |
def stoch(df, window=14, d=3, k=3, fast=False):
"""
compute the n period relative strength indicator
http://excelta.blogspot.co.il/2013/09/stochastic-oscillator-technical.html
"""
my_df = pd.DataFrame(index=df.index)
my_df['rolling_max'] = df['high'].rolling(window).max()
my_df['rolling_min'] = df['low'].rolling(window).min()
my_df['fast_k'] = 100 * (df['close'] - my_df['rolling_min'])/(my_df['rolling_max'] - my_df['rolling_min'])
my_df['fast_d'] = my_df['fast_k'].rolling(d).mean()
if fast:
return my_df.loc[:, ['fast_k', 'fast_d']]
my_df['slow_k'] = my_df['fast_k'].rolling(k).mean()
my_df['slow_d'] = my_df['slow_k'].rolling(d).mean()
return my_df.loc[:, ['slow_k', 'slow_d']] | compute the n period relative strength indicator
http://excelta.blogspot.co.il/2013/09/stochastic-oscillator-technical.html |
def NetFxSDKIncludes(self):
"""
Microsoft .Net Framework SDK Includes
"""
if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
return []
return [os.path.join(self.si.NetFxSdkDir, r'include\um')] | Microsoft .Net Framework SDK Includes |
def parallel_part(data, parallel):
"""parallel_part(data, parallel) -> part
Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable.
**Parameters:**
``data`` : [object]
A list of data that should be split up into ``parallel`` parts
``parallel`` : int or ``None``
The total number of parts, in which the data should be split into
**Returns:**
``part`` : [object]
The desired partition of the ``data``
"""
if parallel is None or "SGE_TASK_ID" not in os.environ:
return data
data_per_job = int(math.ceil(float(len(data)) / float(parallel)))
task_id = int(os.environ['SGE_TASK_ID'])
first = (task_id-1) * data_per_job
last = min(len(data), task_id * data_per_job)
return data[first:last] | parallel_part(data, parallel) -> part
Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable.
**Parameters:**
``data`` : [object]
A list of data that should be split up into ``parallel`` parts
``parallel`` : int or ``None``
The total number of parts, in which the data should be split into
**Returns:**
``part`` : [object]
The desired partition of the ``data`` |
def hist(sample, options={}, **kwargs):
"""Draw a histogram in the current context figure.
Parameters
----------
sample: numpy.ndarray, 1d
The sample for which the histogram must be generated.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'counts'
is required for that mark, options['counts'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'counts' is
required for that mark, axes_options['counts'] contains optional
keyword arguments for the constructor of the corresponding axis type.
"""
kwargs['sample'] = sample
scales = kwargs.pop('scales', {})
if 'count' not in scales:
dimension = _get_attribute_dimension('count', Hist)
if dimension in _context['scales']:
scales['count'] = _context['scales'][dimension]
else:
scales['count'] = LinearScale(**options.get('count', {}))
_context['scales'][dimension] = scales['count']
kwargs['scales'] = scales
return _draw_mark(Hist, options=options, **kwargs) | Draw a histogram in the current context figure.
Parameters
----------
sample: numpy.ndarray, 1d
The sample for which the histogram must be generated.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'counts'
is required for that mark, options['counts'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'counts' is
required for that mark, axes_options['counts'] contains optional
keyword arguments for the constructor of the corresponding axis type. |
def has_started(self):
"""
Whether the handler has completed all start up processes such as
establishing the connection, session, link and authentication, and
is not ready to process messages.
**This function is now deprecated and will be removed in v2.0+.**
:rtype: bool
"""
# pylint: disable=protected-access
timeout = False
auth_in_progress = False
if self._handler._connection.cbs:
timeout, auth_in_progress = self._handler._auth.handle_token()
if timeout:
raise EventHubError("Authorization timeout.")
if auth_in_progress:
return False
if not self._handler._client_ready():
return False
return True | Whether the handler has completed all start up processes such as
establishing the connection, session, link and authentication, and
is not ready to process messages.
**This function is now deprecated and will be removed in v2.0+.**
:rtype: bool |
def keyword(
name: str,
ns: Optional[str] = None,
kw_cache: atom.Atom["PMap[int, Keyword]"] = __INTERN,
) -> Keyword:
"""Create a new keyword."""
h = hash((name, ns))
return kw_cache.swap(__get_or_create, h, name, ns)[h] | Create a new keyword. |
async def edit_2fa(
self, current_password=None, new_password=None,
*, hint='', email=None, email_code_callback=None):
"""
Changes the 2FA settings of the logged in user, according to the
passed parameters. Take note of the parameter explanations.
Note that this method may be *incredibly* slow depending on the
prime numbers that must be used during the process to make sure
that everything is safe.
Has no effect if both current and new password are omitted.
current_password (`str`, optional):
The current password, to authorize changing to ``new_password``.
Must be set if changing existing 2FA settings.
Must **not** be set if 2FA is currently disabled.
Passing this by itself will remove 2FA (if correct).
new_password (`str`, optional):
The password to set as 2FA.
If 2FA was already enabled, ``current_password`` **must** be set.
Leaving this blank or ``None`` will remove the password.
hint (`str`, optional):
Hint to be displayed by Telegram when it asks for 2FA.
Leaving unspecified is highly discouraged.
Has no effect if ``new_password`` is not set.
email (`str`, optional):
Recovery and verification email. If present, you must also
set `email_code_callback`, else it raises ``ValueError``.
email_code_callback (`callable`, optional):
If an email is provided, a callback that returns the code sent
to it must also be set. This callback may be asynchronous.
It should return a string with the code. The length of the
code will be passed to the callback as an input parameter.
If the callback returns an invalid code, it will raise
``CodeInvalidError``.
Returns:
``True`` if successful, ``False`` otherwise.
"""
if new_password is None and current_password is None:
return False
if email and not callable(email_code_callback):
raise ValueError('email present without email_code_callback')
pwd = await self(functions.account.GetPasswordRequest())
pwd.new_algo.salt1 += os.urandom(32)
assert isinstance(pwd, types.account.Password)
if not pwd.has_password and current_password:
current_password = None
if current_password:
password = pwd_mod.compute_check(pwd, current_password)
else:
password = types.InputCheckPasswordEmpty()
if new_password:
new_password_hash = pwd_mod.compute_digest(
pwd.new_algo, new_password)
else:
new_password_hash = b''
try:
await self(functions.account.UpdatePasswordSettingsRequest(
password=password,
new_settings=types.account.PasswordInputSettings(
new_algo=pwd.new_algo,
new_password_hash=new_password_hash,
hint=hint,
email=email,
new_secure_settings=None
)
))
except errors.EmailUnconfirmedError as e:
code = email_code_callback(e.code_length)
if inspect.isawaitable(code):
code = await code
code = str(code)
await self(functions.account.ConfirmPasswordEmailRequest(code))
return True | Changes the 2FA settings of the logged in user, according to the
passed parameters. Take note of the parameter explanations.
Note that this method may be *incredibly* slow depending on the
prime numbers that must be used during the process to make sure
that everything is safe.
Has no effect if both current and new password are omitted.
current_password (`str`, optional):
The current password, to authorize changing to ``new_password``.
Must be set if changing existing 2FA settings.
Must **not** be set if 2FA is currently disabled.
Passing this by itself will remove 2FA (if correct).
new_password (`str`, optional):
The password to set as 2FA.
If 2FA was already enabled, ``current_password`` **must** be set.
Leaving this blank or ``None`` will remove the password.
hint (`str`, optional):
Hint to be displayed by Telegram when it asks for 2FA.
Leaving unspecified is highly discouraged.
Has no effect if ``new_password`` is not set.
email (`str`, optional):
Recovery and verification email. If present, you must also
set `email_code_callback`, else it raises ``ValueError``.
email_code_callback (`callable`, optional):
If an email is provided, a callback that returns the code sent
to it must also be set. This callback may be asynchronous.
It should return a string with the code. The length of the
code will be passed to the callback as an input parameter.
If the callback returns an invalid code, it will raise
``CodeInvalidError``.
Returns:
``True`` if successful, ``False`` otherwise. |
def _init(self):
"""Initialize layer structure."""
group_stack = [self]
clip_stack = []
last_layer = None
for record, channels in self._record._iter_layers():
current_group = group_stack[-1]
blocks = record.tagged_blocks
end_of_group = False
divider = blocks.get_data('SECTION_DIVIDER_SETTING', None)
divider = blocks.get_data('NESTED_SECTION_DIVIDER_SETTING',
divider)
if divider is not None:
if divider.kind == SectionDivider.BOUNDING_SECTION_DIVIDER:
layer = Group(self, None, None, current_group)
group_stack.append(layer)
elif divider.kind in (SectionDivider.OPEN_FOLDER,
SectionDivider.CLOSED_FOLDER):
layer = group_stack.pop()
assert layer is not self
layer._record = record
layer._channels = channels
for key in (
'ARTBOARD_DATA1', 'ARTBOARD_DATA2', 'ARTBOARD_DATA3'
):
if key in blocks:
layer = Artboard._move(layer)
end_of_group = True
elif (
'TYPE_TOOL_OBJECT_SETTING' in blocks or
'TYPE_TOOL_INFO' in blocks
):
layer = TypeLayer(self, record, channels, current_group)
elif (
record.flags.pixel_data_irrelevant and (
'VECTOR_ORIGINATION_DATA' in blocks or
'VECTOR_MASK_SETTING1' in blocks or
'VECTOR_MASK_SETTING2' in blocks or
'VECTOR_STROKE_DATA' in blocks or
'VECTOR_STROKE_CONTENT_DATA' in blocks
)
):
layer = ShapeLayer(self, record, channels, current_group)
elif (
'SMART_OBJECT_LAYER_DATA1' in blocks or
'SMART_OBJECT_LAYER_DATA2' in blocks or
'PLACED_LAYER1' in blocks or
'PLACED_LAYER2' in blocks
):
layer = SmartObjectLayer(self, record, channels,
current_group)
else:
layer = None
for key in adjustments.TYPES.keys():
if key in blocks:
layer = adjustments.TYPES[key](
self, record, channels, current_group
)
break
# If nothing applies, this is a pixel layer.
if layer is None:
layer = PixelLayer(
self, record, channels, current_group
)
if record.clipping == Clipping.NON_BASE:
clip_stack.append(layer)
else:
if clip_stack:
last_layer._clip_layers = clip_stack
clip_stack = []
if not end_of_group:
current_group._layers.append(layer)
last_layer = layer
if clip_stack and last_layer:
last_layer._clip_layers = clip_stack | Initialize layer structure. |
def run_one(self, set_title=False):
'''Get exactly one job, run it, and return.
Does nothing (but returns :const:`False`) if there is no work
to do. Ignores the global mode; this will do work even
if :func:`rejester.TaskMaster.get_mode` returns
:attr:`~rejester.TaskMaster.TERMINATE`.
:param set_title: if true, set the process's title with the
work unit name
:return: :const:`True` if there was a job (even if it failed)
'''
available_gb = MultiWorker.available_gb()
unit = self.task_master.get_work(self.worker_id, available_gb, work_spec_names=self.work_spec_names, max_jobs=self.max_jobs)
if not unit:
logger.info('No work to do; stopping.')
return False
if isinstance(unit, (list, tuple)):
ok = True
for xunit in unit:
if not ok:
try:
xunit.update(-1)
except LostLease as e:
pass
except Exception as bad:
# we're already quitting everything, but this is weirdly bad.
logger.error('failed to release lease on %r %r', xunit.work_spec_name, xunit.key, exc_info=True)
else:
ok = self._run_unit(xunit, set_title)
return ok
return self._run_unit(unit) | Get exactly one job, run it, and return.
Does nothing (but returns :const:`False`) if there is no work
to do. Ignores the global mode; this will do work even
if :func:`rejester.TaskMaster.get_mode` returns
:attr:`~rejester.TaskMaster.TERMINATE`.
:param set_title: if true, set the process's title with the
work unit name
:return: :const:`True` if there was a job (even if it failed) |
def encode(self, word, max_length=-1, keep_vowels=False, vowel_char='*'):
r"""Return the Dolby Code of a name.
Parameters
----------
word : str
The word to transform
max_length : int
Maximum length of the returned Dolby code -- this also activates
the fixed-length code mode if it is greater than 0
keep_vowels : bool
If True, retains all vowel markers
vowel_char : str
The vowel marker character (default to \*)
Returns
-------
str
The Dolby Code
Examples
--------
>>> pe = Dolby()
>>> pe.encode('Hansen')
'H*NSN'
>>> pe.encode('Larsen')
'L*RSN'
>>> pe.encode('Aagaard')
'*GR'
>>> pe.encode('Braaten')
'BR*DN'
>>> pe.encode('Sandvik')
'S*NVK'
>>> pe.encode('Hansen', max_length=6)
'H*NS*N'
>>> pe.encode('Larsen', max_length=6)
'L*RS*N'
>>> pe.encode('Aagaard', max_length=6)
'*G*R '
>>> pe.encode('Braaten', max_length=6)
'BR*D*N'
>>> pe.encode('Sandvik', max_length=6)
'S*NF*K'
>>> pe.encode('Smith')
'SM*D'
>>> pe.encode('Waters')
'W*DRS'
>>> pe.encode('James')
'J*MS'
>>> pe.encode('Schmidt')
'SM*D'
>>> pe.encode('Ashcroft')
'*SKRFD'
>>> pe.encode('Smith', max_length=6)
'SM*D '
>>> pe.encode('Waters', max_length=6)
'W*D*RS'
>>> pe.encode('James', max_length=6)
'J*M*S '
>>> pe.encode('Schmidt', max_length=6)
'SM*D '
>>> pe.encode('Ashcroft', max_length=6)
'*SKRFD'
"""
# uppercase, normalize, decompose, and filter non-A-Z out
word = unicode_normalize('NFKD', text_type(word.upper()))
word = word.replace('ß', 'SS')
word = ''.join(c for c in word if c in self._uc_set)
# Rule 1 (FL2)
if word[:3] in {'MCG', 'MAG', 'MAC'}:
word = 'MK' + word[3:]
elif word[:2] == 'MC':
word = 'MK' + word[2:]
# Rule 2 (FL3)
pos = len(word) - 2
while pos > -1:
if word[pos : pos + 2] in {
'DT',
'LD',
'ND',
'NT',
'RC',
'RD',
'RT',
'SC',
'SK',
'ST',
}:
word = word[: pos + 1] + word[pos + 2 :]
pos += 1
pos -= 1
# Rule 3 (FL4)
# Although the rule indicates "after the first letter", the test cases
# make it clear that these apply to the first letter also.
word = word.replace('X', 'KS')
word = word.replace('CE', 'SE')
word = word.replace('CI', 'SI')
word = word.replace('CY', 'SI')
# not in the rule set, but they seem to have intended it
word = word.replace('TCH', 'CH')
pos = word.find('CH', 1)
while pos != -1:
if word[pos - 1 : pos] not in self._uc_vy_set:
word = word[:pos] + 'S' + word[pos + 1 :]
pos = word.find('CH', pos + 1)
word = word.replace('C', 'K')
word = word.replace('Z', 'S')
word = word.replace('WR', 'R')
word = word.replace('DG', 'G')
word = word.replace('QU', 'K')
word = word.replace('T', 'D')
word = word.replace('PH', 'F')
# Rule 4 (FL5)
# Although the rule indicates "after the first letter", the test cases
# make it clear that these apply to the first letter also.
pos = word.find('K', 0)
while pos != -1:
if pos > 1 and word[pos - 1 : pos] not in self._uc_vy_set | {
'L',
'N',
'R',
}:
word = word[: pos - 1] + word[pos:]
pos -= 1
pos = word.find('K', pos + 1)
# Rule FL6
if max_length > 0 and word[-1:] == 'E':
word = word[:-1]
# Rule 5 (FL7)
word = self._delete_consecutive_repeats(word)
# Rule 6 (FL8)
if word[:2] == 'PF':
word = word[1:]
if word[-2:] == 'PF':
word = word[:-1]
elif word[-2:] == 'GH':
if word[-3:-2] in self._uc_vy_set:
word = word[:-2] + 'F'
else:
word = word[:-2] + 'G'
word = word.replace('GH', '')
# Rule FL9
if max_length > 0:
word = word.replace('V', 'F')
# Rules 7-9 (FL10-FL12)
first = 1 + (1 if max_length > 0 else 0)
code = ''
for pos, char in enumerate(word):
if char in self._uc_vy_set:
if first or keep_vowels:
code += vowel_char
first -= 1
elif pos > 0 and char in {'W', 'H'}:
continue
else:
code += char
if max_length > 0:
# Rule FL13
if len(code) > max_length and code[-1:] == 'S':
code = code[:-1]
if keep_vowels:
code = code[:max_length]
else:
# Rule FL14
code = code[: max_length + 2]
# Rule FL15
while len(code) > max_length:
vowels = len(code) - max_length
excess = vowels - 1
word = code
code = ''
for char in word:
if char == vowel_char:
if vowels:
code += char
vowels -= 1
else:
code += char
code = code[: max_length + excess]
# Rule FL16
code += ' ' * (max_length - len(code))
return code | r"""Return the Dolby Code of a name.
Parameters
----------
word : str
The word to transform
max_length : int
Maximum length of the returned Dolby code -- this also activates
the fixed-length code mode if it is greater than 0
keep_vowels : bool
If True, retains all vowel markers
vowel_char : str
The vowel marker character (default to \*)
Returns
-------
str
The Dolby Code
Examples
--------
>>> pe = Dolby()
>>> pe.encode('Hansen')
'H*NSN'
>>> pe.encode('Larsen')
'L*RSN'
>>> pe.encode('Aagaard')
'*GR'
>>> pe.encode('Braaten')
'BR*DN'
>>> pe.encode('Sandvik')
'S*NVK'
>>> pe.encode('Hansen', max_length=6)
'H*NS*N'
>>> pe.encode('Larsen', max_length=6)
'L*RS*N'
>>> pe.encode('Aagaard', max_length=6)
'*G*R '
>>> pe.encode('Braaten', max_length=6)
'BR*D*N'
>>> pe.encode('Sandvik', max_length=6)
'S*NF*K'
>>> pe.encode('Smith')
'SM*D'
>>> pe.encode('Waters')
'W*DRS'
>>> pe.encode('James')
'J*MS'
>>> pe.encode('Schmidt')
'SM*D'
>>> pe.encode('Ashcroft')
'*SKRFD'
>>> pe.encode('Smith', max_length=6)
'SM*D '
>>> pe.encode('Waters', max_length=6)
'W*D*RS'
>>> pe.encode('James', max_length=6)
'J*M*S '
>>> pe.encode('Schmidt', max_length=6)
'SM*D '
>>> pe.encode('Ashcroft', max_length=6)
'*SKRFD' |
def insert_before(self, value: Union[RawValue, Value],
raw: bool = False) -> "ArrayEntry":
"""Insert a new entry before the receiver.
Args:
value: The value of the new entry.
raw: Flag to be set if `value` is raw.
Returns:
An instance node of the new inserted entry.
"""
return ArrayEntry(self.index, self.before, self.after.cons(self.value),
self._cook_value(value, raw), self.parinst,
self.schema_node, datetime.now()) | Insert a new entry before the receiver.
Args:
value: The value of the new entry.
raw: Flag to be set if `value` is raw.
Returns:
An instance node of the new inserted entry. |
def fetch_credential(self, credential=None, profile=None):
"""Fetch credential from credentials file.
Args:
credential (str): Credential to fetch.
profile (str): Credentials profile. Defaults to ``'default'``.
Returns:
str, None: Fetched credential or ``None``.
"""
q = self.db.get(self.query.profile == profile)
if q is not None:
return q.get(credential) | Fetch credential from credentials file.
Args:
credential (str): Credential to fetch.
profile (str): Credentials profile. Defaults to ``'default'``.
Returns:
str, None: Fetched credential or ``None``. |
def flow_ramp(self):
"""An equally spaced array representing flow at each row."""
return np.linspace(1 / self.n_rows, 1, self.n_rows)*self.q | An equally spaced array representing flow at each row. |
def read_certificate_signing_request(self, name, **kwargs): # noqa: E501
"""read_certificate_signing_request # noqa: E501
read the specified CertificateSigningRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_certificate_signing_request(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_certificate_signing_request_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_certificate_signing_request_with_http_info(name, **kwargs) # noqa: E501
return data | read_certificate_signing_request # noqa: E501
read the specified CertificateSigningRequest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_certificate_signing_request(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread. |
def remove_entity(self, name):
"""Unload an entity"""
self.entities.remove(name)
self.padaos.remove_entity(name) | Unload an entity |
def serialzeValueToTCL(self, val, do_eval=False) -> Tuple[str, str, bool]:
"""
:see: doc of method on parent class
"""
if isinstance(val, int):
val = hInt(val)
if do_eval:
val = val.staticEval()
if isinstance(val, RtlSignalBase):
ctx = VivadoTclExpressionSerializer.getBaseContext()
tclVal = VivadoTclExpressionSerializer.asHdl(val, ctx)
tclValVal = VivadoTclExpressionSerializer.asHdl(
val.staticEval())
return tclVal, tclValVal, False
else:
tclVal = VivadoTclExpressionSerializer.asHdl(val, None)
return tclVal, tclVal, True | :see: doc of method on parent class |
def set_app_id(self, id, version, icon):
'''Sets some meta-information about the application.
See also L{set_user_agent}().
@param id: Java-style application identifier, e.g. "com.acme.foobar".
@param version: application version numbers, e.g. "1.2.3".
@param icon: application icon name, e.g. "foobar".
@version: LibVLC 2.1.0 or later.
'''
return libvlc_set_app_id(self, str_to_bytes(id), str_to_bytes(version), str_to_bytes(icon)) | Sets some meta-information about the application.
See also L{set_user_agent}().
@param id: Java-style application identifier, e.g. "com.acme.foobar".
@param version: application version numbers, e.g. "1.2.3".
@param icon: application icon name, e.g. "foobar".
@version: LibVLC 2.1.0 or later. |
def expect_re(regexp, buf, pos):
"""Require a regular expression at the current buffer position."""
match = regexp.match(buf, pos)
if not match:
return None, len(buf)
return buf[match.start(1):match.end(1)], match.end(0) | Require a regular expression at the current buffer position. |
def get_queryset(self):
'''
If MultiTenantMiddleware is used, filter queryset by request.site_id
'''
queryset = super(PageList, self).get_queryset()
if hasattr(self.request, 'site_id'):
queryset = queryset.filter(site_id=self.request.site_id)
return queryset | If MultiTenantMiddleware is used, filter queryset by request.site_id |
def write(self, location=None):
"""
Write file to I/O backend.
"""
# Take location and expand tilde.
if location is not None:
self.location = location
assert self.location
# Find I/O backend that handles this location.
for io in self.editor.io_backends:
if io.can_open_location(self.location):
break
else:
self.editor.show_message('Unknown location: %r' % location)
# Write it.
try:
io.write(self.location, self.buffer.text + '\n', self.encoding)
self.is_new = False
except Exception as e:
# E.g. "No such file or directory."
self.editor.show_message('%s' % e)
else:
# When the save succeeds: update: _file_content.
self._file_content = self.buffer.text | Write file to I/O backend. |
def move_forward(columns=1, file=sys.stdout):
""" Move the cursor forward a number of columns.
Esc[<columns>C:
Moves the cursor forward by the specified number of columns without
changing lines. If the cursor is already in the rightmost column,
ANSI.SYS ignores this sequence.
"""
move.forward(columns).write(file=file) | Move the cursor forward a number of columns.
Esc[<columns>C:
Moves the cursor forward by the specified number of columns without
changing lines. If the cursor is already in the rightmost column,
ANSI.SYS ignores this sequence. |
def wait_until(predicate, success_description, timeout=10):
"""Wait up to 10 seconds (by default) for predicate to be true.
E.g.:
wait_until(lambda: client.primary == ('a', 1),
'connect to the primary')
If the lambda-expression isn't true after 10 seconds, we raise
AssertionError("Didn't ever connect to the primary").
Returns the predicate's first true value.
"""
start = time.time()
while True:
retval = predicate()
if retval:
return retval
if time.time() - start > timeout:
raise AssertionError("Didn't ever %s" % success_description)
time.sleep(0.1) | Wait up to 10 seconds (by default) for predicate to be true.
E.g.:
wait_until(lambda: client.primary == ('a', 1),
'connect to the primary')
If the lambda-expression isn't true after 10 seconds, we raise
AssertionError("Didn't ever connect to the primary").
Returns the predicate's first true value. |
def clear_all():
"""DANGER!
*This command is a maintenance tool and clears the complete database.*
"""
sure = input("Are you sure to drop the complete database content? (Type "
"in upppercase YES)")
if not (sure == 'YES'):
db_log('Not deleting the database.')
sys.exit(5)
client = pymongo.MongoClient(host=dbhost, port=dbport)
db = client[dbname]
for col in db.collection_names(include_system_collections=False):
db_log("Dropping collection ", col, lvl=warn)
db.drop_collection(col) | DANGER!
*This command is a maintenance tool and clears the complete database.* |
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss | Trains the model. |
def main_make_views(gtfs_fname):
"""Re-create all views.
"""
print("creating views")
conn = GTFS(fname_or_conn=gtfs_fname).conn
for L in Loaders:
L(None).make_views(conn)
conn.commit() | Re-create all views. |
async def get_blueprint_params(request, left: int, right: int) -> str:
"""
API Description: Multiply, left * right. This will show in the swagger page (localhost:8000/api/v1/).
"""
res = left * right
return "{left}*{right}={res}".format(left=left, right=right, res=res) | API Description: Multiply, left * right. This will show in the swagger page (localhost:8000/api/v1/). |
def lookup_defs(self, variable, size_threshold=32):
"""
Find all definitions of the varaible
:param SimVariable variable: The variable to lookup for.
:param int size_threshold: The maximum bytes to consider for the variable. For example, if the variable is 100
byte long, only the first `size_threshold` bytes are considered.
:return: A set of code locations where the variable is defined.
:rtype: set
"""
live_def_locs = set()
if isinstance(variable, SimRegisterVariable):
if variable.reg is None:
l.warning('lookup_defs: Got a None for a SimRegisterVariable. Consider fixing.')
return live_def_locs
size = min(variable.size, size_threshold)
offset = variable.reg
while offset < variable.reg + size:
if offset in self._register_map:
live_def_locs |= self._register_map[offset]
offset += 1
elif isinstance(variable, SimMemoryVariable):
size = min(variable.size, size_threshold)
offset = variable.addr
while offset < variable.addr + size:
if offset in self._memory_map:
live_def_locs |= self._memory_map[offset]
offset += 1
else:
# umm unsupported variable type
l.error('Unsupported variable type "%s".', type(variable))
return live_def_locs | Find all definitions of the varaible
:param SimVariable variable: The variable to lookup for.
:param int size_threshold: The maximum bytes to consider for the variable. For example, if the variable is 100
byte long, only the first `size_threshold` bytes are considered.
:return: A set of code locations where the variable is defined.
:rtype: set |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.