code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _toggle_filming(self):
"""Toggles the camera system recording state"""
if self._filming:
self.log("Stopping operation")
self._filming = False
self.timer.stop()
else:
self.log("Starting operation")
self._filming = True
self.timer.start() | Toggles the camera system recording state |
def reload(self):
""" Function reload
Sync the full object
"""
self.load(self.api.get(self.objName, self.key)) | Function reload
Sync the full object |
def move_tab(self, index_from, index_to):
"""Move tab."""
client = self.clients.pop(index_from)
self.clients.insert(index_to, client) | Move tab. |
def verify_registration(request):
"""
Verify registration via signature.
"""
user = process_verify_registration_data(request.data)
extra_data = None
if registration_settings.REGISTER_VERIFICATION_AUTO_LOGIN:
extra_data = perform_login(request, user)
return get_ok_response('User verified successfully', extra_data=extra_data) | Verify registration via signature. |
def with_setup(setup=None, teardown=None):
"""Decorator to add setup and/or teardown methods to a test function::
@with_setup(setup, teardown)
def test_something():
" ... "
Note that `with_setup` is useful *only* for test functions, not for test
methods or inside of TestCase subclasses.
"""
def decorate(func, setup=setup, teardown=teardown):
if setup:
if hasattr(func, 'setup'):
_old_s = func.setup
def _s():
setup()
_old_s()
func.setup = _s
else:
func.setup = setup
if teardown:
if hasattr(func, 'teardown'):
_old_t = func.teardown
def _t():
_old_t()
teardown()
func.teardown = _t
else:
func.teardown = teardown
return func
return decorate | Decorator to add setup and/or teardown methods to a test function::
@with_setup(setup, teardown)
def test_something():
" ... "
Note that `with_setup` is useful *only* for test functions, not for test
methods or inside of TestCase subclasses. |
def _check_success(self):
"""
Returns True if task is successfully completed
"""
# cube is higher than the table top above a margin
cube_height = self.sim.data.body_xpos[self.cube_body_id][2]
table_height = self.table_full_size[2]
return cube_height > table_height + 0.10 | Returns True if task is successfully completed |
def write_str(data, sidx, pnames):
""" Write STRUCTURE format for all SNPs and unlinked SNPs """
## grab snp and bis data from tmparr
start = time.time()
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
snparr = io5["snparr"]
bisarr = io5["bisarr"]
## trim to size b/c it was made longer than actual
bend = np.where(np.all(bisarr[:] == "", axis=0))[0]
if np.any(bend):
bend = bend.min()
else:
bend = bisarr.shape[1]
send = np.where(np.all(snparr[:] == "", axis=0))[0]
if np.any(send):
send = send.min()
else:
send = snparr.shape[1]
## write to str and ustr
out1 = open(data.outfiles.str, 'w')
out2 = open(data.outfiles.ustr, 'w')
numdict = {'A': '0', 'T': '1', 'G': '2', 'C': '3', 'N': '-9', '-': '-9'}
if data.paramsdict["max_alleles_consens"] > 1:
for idx, name in enumerate(pnames):
out1.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]])))
out1.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][1]] for i in snparr[idx, :send]])))
out2.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]])))
out2.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][1]] for i in bisarr[idx, :bend]])))
else:
## haploid output
for idx, name in enumerate(pnames):
out1.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]])))
out2.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]])))
out1.close()
out2.close()
LOGGER.debug("finished writing str in: %s", time.time() - start) | Write STRUCTURE format for all SNPs and unlinked SNPs |
def init(options, use_sigterm_handler=True):
"""
Must be called just after registration, before anything else
"""
# pylint: disable-msg=W0613
global _AUTH, _OPTIONS
if isinstance(options, dict):
_OPTIONS = DEFAULT_OPTIONS.copy()
_OPTIONS.update(options)
else:
for optname, optvalue in DEFAULT_OPTIONS.iteritems():
if hasattr(options, optname):
_OPTIONS[optname] = getattr(options, optname)
else:
_OPTIONS[optname] = optvalue
if _OPTIONS['testmethods']:
def fortytwo(request):
"test GET method"
return 42
def ping(request):
"test POST method"
return request.payload_params()
register(fortytwo, 'GET')
register(ping, 'POST')
if _OPTIONS['auth_basic_file']:
_AUTH = HttpAuthentication(_OPTIONS['auth_basic_file'],
realm = _OPTIONS['auth_basic']).parse_file()
for name, cmd in _COMMANDS.iteritems():
if cmd.safe_init:
LOG.info("safe_init: %r", name)
cmd.safe_init(_OPTIONS)
if use_sigterm_handler:
# signal.signal(signal.SIGHUP, lambda *x: None) # XXX
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler) | Must be called just after registration, before anything else |
def matches_filters(self, node):
"""
Returns whether the given node matches all filters.
Args:
node (Element): The node to evaluate.
Returns:
bool: Whether the given node matches.
"""
visible = self.visible
if self.options["text"]:
if isregex(self.options["text"]):
regex = self.options["text"]
elif self.exact_text is True:
regex = re.compile(r"\A{}\Z".format(re.escape(self.options["text"])))
else:
regex = toregex(self.options["text"])
text = normalize_text(
node.all_text if visible == "all" else node.visible_text)
if not regex.search(text):
return False
if isinstance(self.exact_text, (bytes_, str_)):
regex = re.compile(r"\A{}\Z".format(re.escape(self.exact_text)))
text = normalize_text(
node.all_text if visible == "all" else node.visible_text)
if not regex.search(text):
return False
if visible == "visible":
if not node.visible:
return False
elif visible == "hidden":
if node.visible:
return False
for name, node_filter in iter(self._node_filters.items()):
if name in self.filter_options:
if not node_filter.matches(node, self.filter_options[name]):
return False
elif node_filter.has_default:
if not node_filter.matches(node, node_filter.default):
return False
if self.options["filter"] and not self.options["filter"](node):
return False
return True | Returns whether the given node matches all filters.
Args:
node (Element): The node to evaluate.
Returns:
bool: Whether the given node matches. |
def create_LM_hashed_password_v1(passwd):
"""create LanManager hashed password"""
# if the passwd provided is already a hash, we just return the first half
if re.match(r'^[\w]{32}:[\w]{32}$', passwd):
return binascii.unhexlify(passwd.split(':')[0])
# fix the password length to 14 bytes
passwd = passwd.upper()
lm_pw = passwd + '\0' * (14 - len(passwd))
lm_pw = passwd[0:14]
# do hash
magic_str = b"KGS!@#$%" # page 57 in [MS-NLMP]
res = b''
dobj = des.DES(lm_pw[0:7])
res = res + dobj.encrypt(magic_str)
dobj = des.DES(lm_pw[7:14])
res = res + dobj.encrypt(magic_str)
return res | create LanManager hashed password |
def get_pages_for_display(self):
"""Return all pages needed for rendering all sub-levels for the current
menu"""
# Start with an empty queryset, and expand as needed
all_pages = Page.objects.none()
if self.max_levels == 1:
# If no additional sub-levels are needed, return empty queryset
return all_pages
for item in self.top_level_items:
if item.link_page_id:
# Fetch a 'branch' of suitable descendants for this item and
# add to 'all_pages'
page_depth = item.link_page.depth
if(
item.allow_subnav and
page_depth >= settings.SECTION_ROOT_DEPTH
):
all_pages = all_pages | Page.objects.filter(
depth__gt=page_depth,
depth__lt=page_depth + self.max_levels,
path__startswith=item.link_page.path)
# Filter the entire queryset to include only pages suitable for display
all_pages = all_pages & self.get_base_page_queryset()
# Return 'specific' page instances if required
if self.use_specific == constants.USE_SPECIFIC_ALWAYS:
return all_pages.specific()
return all_pages | Return all pages needed for rendering all sub-levels for the current
menu |
def dot(self, other_tf):
"""Compose this simliarity transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`SimilarityTransform`
The other SimilarityTransform to compose with this one.
Returns
-------
:obj:`SimilarityTransform`
A SimilarityTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame.
"""
if other_tf.to_frame != self.from_frame:
raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame))
if not isinstance(other_tf, RigidTransform):
raise ValueError('Can only compose with other RigidTransform classes')
other_scale = 1.0
if isinstance(other_tf, SimilarityTransform):
other_scale = other_tf.scale
rotation = self.rotation.dot(other_tf.rotation)
translation = self.translation + self.scale * self.rotation.dot(other_tf.translation)
scale = self.scale * other_scale
return SimilarityTransform(rotation, translation, scale,
from_frame=other_tf.from_frame,
to_frame=self.to_frame) | Compose this simliarity transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`SimilarityTransform`
The other SimilarityTransform to compose with this one.
Returns
-------
:obj:`SimilarityTransform`
A SimilarityTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame. |
def event_text_key(self, event):
"""
So a "invert shift" for user inputs:
Convert all lowercase letters to uppercase and vice versa.
"""
char = event.char
if not char or char not in string.ascii_letters:
# ignore all non letter inputs
return
converted_char = invert_shift(char)
log.debug("convert keycode %s - char %s to %s", event.keycode, repr(char), converted_char)
# self.text.delete(Tkinter.INSERT + "-1c") # Delete last input char
self.text.insert(tkinter.INSERT, converted_char) # Insert converted char
return "break" | So a "invert shift" for user inputs:
Convert all lowercase letters to uppercase and vice versa. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key_as_string') and self.key_as_string is not None:
_dict['key_as_string'] = datetime_to_string(self.key_as_string)
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'event_rate') and self.event_rate is not None:
_dict['event_rate'] = self.event_rate
return _dict | Return a json dictionary representing this model. |
def descend(self, include_me=True):
"""Descend depth first into all child nodes"""
if include_me:
yield self
for child in self.child_list:
yield child
yield from child.descend() | Descend depth first into all child nodes |
def anderson(*args, dist='norm'):
"""Anderson-Darling test of distribution.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be different lengths.
dist : string
Distribution ('norm', 'expon', 'logistic', 'gumbel')
Returns
-------
from_dist : boolean
True if data comes from this distribution.
sig_level : float
The significance levels for the corresponding critical values in %.
(See :py:func:`scipy.stats.anderson` for more details)
Examples
--------
1. Test that an array comes from a normal distribution
>>> from pingouin import anderson
>>> x = [2.3, 5.1, 4.3, 2.6, 7.8, 9.2, 1.4]
>>> anderson(x, dist='norm')
(False, 15.0)
2. Test that two arrays comes from an exponential distribution
>>> y = [2.8, 12.4, 28.3, 3.2, 16.3, 14.2]
>>> anderson(x, y, dist='expon')
(array([False, False]), array([15., 15.]))
"""
from scipy.stats import anderson as ads
k = len(args)
from_dist = np.zeros(k, 'bool')
sig_level = np.zeros(k)
for j in range(k):
st, cr, sig = ads(args[j], dist=dist)
from_dist[j] = True if (st > cr).any() else False
sig_level[j] = sig[np.argmin(np.abs(st - cr))]
if k == 1:
from_dist = bool(from_dist)
sig_level = float(sig_level)
return from_dist, sig_level | Anderson-Darling test of distribution.
Parameters
----------
sample1, sample2,... : array_like
Array of sample data. May be different lengths.
dist : string
Distribution ('norm', 'expon', 'logistic', 'gumbel')
Returns
-------
from_dist : boolean
True if data comes from this distribution.
sig_level : float
The significance levels for the corresponding critical values in %.
(See :py:func:`scipy.stats.anderson` for more details)
Examples
--------
1. Test that an array comes from a normal distribution
>>> from pingouin import anderson
>>> x = [2.3, 5.1, 4.3, 2.6, 7.8, 9.2, 1.4]
>>> anderson(x, dist='norm')
(False, 15.0)
2. Test that two arrays comes from an exponential distribution
>>> y = [2.8, 12.4, 28.3, 3.2, 16.3, 14.2]
>>> anderson(x, y, dist='expon')
(array([False, False]), array([15., 15.])) |
def request(self, method, api_url, params={}, **kwargs):
"""Generate the API call to the device."""
LOG.debug("axapi_http: full url = %s", self.url_base + api_url)
LOG.debug("axapi_http: %s url = %s", method, api_url)
LOG.debug("axapi_http: params = %s", json.dumps(logutils.clean(params), indent=4))
# Set "data" variable for the request
if params:
extra_params = kwargs.get('axapi_args', {})
params_copy = merge_dicts(params, extra_params)
LOG.debug("axapi_http: params_all = %s", logutils.clean(params_copy))
payload = json.dumps(params_copy)
else:
try:
payload = kwargs.pop('payload', None)
self.headers = dict(self.HEADERS, **kwargs.pop('headers', {}))
LOG.debug("axapi_http: headers_all = %s", logutils.clean(self.headers))
except KeyError:
payload = None
max_retries = kwargs.get('max_retries', self.max_retries)
timeout = kwargs.get('timeout', self.timeout)
# Create session to set HTTPAdapter or SSLAdapter
session = Session()
if self.port == 443:
# Add adapter for any https session to force TLS1_0 connection for v21 of AXAPI
session.mount('https://', SSLAdapter(max_retries=max_retries))
else:
session.mount('http://', HTTPAdapter(max_retries=max_retries))
session_request = getattr(session, method.lower())
# Make actual request and handle any errors
try:
device_response = session_request(
self.url_base + api_url, verify=False, data=payload, headers=self.HEADERS, timeout=timeout
)
except (Exception) as e:
LOG.error("acos_client failing with error %s after %s retries", e.__class__.__name__, max_retries)
raise e
finally:
session.close()
# Log if the reponse is one of the known broken response
if device_response in broken_replies:
device_response = broken_replies[device_response]
LOG.debug("axapi_http: broken reply, new response: %s", logutils.clean(device_response))
# Validate json response
try:
json_response = device_response.json()
LOG.debug("axapi_http: data = %s", json.dumps(logutils.clean(json_response), indent=4))
except ValueError as e:
# The response is not JSON but it still succeeded.
LOG.debug("axapi_http: json = %s", e)
return device_response
# Handle "fail" responses returned by AXAPI
if 'response' in json_response and 'status' in json_response['response']:
if json_response['response']['status'] == 'fail':
acos_responses.raise_axapi_ex(json_response, action=extract_method(api_url))
# Return json portion of response
return json_response | Generate the API call to the device. |
def remove_port_channel(self, **kwargs):
"""
Remove a port channel interface.
Args:
port_int (str): port-channel number (1, 2, 3, etc).
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `port_int` is not passed.
ValueError: if `port_int` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.channel_group(name='225/0/20',
... int_type='tengigabitethernet',
... port_int='1', channel_type='standard', mode='active')
... output = dev.interface.remove_port_channel(
... port_int='1')
"""
port_int = kwargs.pop('port_int')
callback = kwargs.pop('callback', self._callback)
if re.search('^[0-9]{1,4}$', port_int) is None:
raise ValueError('%s must be in the format of x for port channel '
'interfaces.' % repr(port_int))
port_channel = getattr(self._interface, 'interface_port_channel_name')
port_channel_args = dict(name=port_int)
config = port_channel(**port_channel_args)
delete_channel = config.find('.//*port-channel')
delete_channel.set('operation', 'delete')
return callback(config) | Remove a port channel interface.
Args:
port_int (str): port-channel number (1, 2, 3, etc).
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `port_int` is not passed.
ValueError: if `port_int` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.channel_group(name='225/0/20',
... int_type='tengigabitethernet',
... port_int='1', channel_type='standard', mode='active')
... output = dev.interface.remove_port_channel(
... port_int='1') |
def add_segmented_colorbar(da, colors, direction):
"""
Add 'non-rastered' colorbar to DrawingArea
"""
nbreak = len(colors)
if direction == 'vertical':
linewidth = da.height/nbreak
verts = [None] * nbreak
x1, x2 = 0, da.width
for i, color in enumerate(colors):
y1 = i * linewidth
y2 = y1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
linewidth = da.width/nbreak
verts = [None] * nbreak
y1, y2 = 0, da.height
for i, color in enumerate(colors):
x1 = i * linewidth
x2 = x1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
coll = mcoll.PolyCollection(verts,
facecolors=colors,
linewidth=0,
antialiased=False)
da.add_artist(coll) | Add 'non-rastered' colorbar to DrawingArea |
def unpublish(self, daap_server):
"""
Unpublish a given server.
If the server was not published, this method will not do anything.
:param DAAPServer daap_server: DAAP Server instance to publish.
"""
if daap_server not in self.daap_servers:
return
self.zeroconf.unregister_service(self.daap_servers[daap_server])
del self.daap_servers[daap_server] | Unpublish a given server.
If the server was not published, this method will not do anything.
:param DAAPServer daap_server: DAAP Server instance to publish. |
def wc_dict2lha(wc, skip_redundant=True, skip_zero=True):
"""Convert a a dictionary of Wilson coefficients into
a dictionary that pylha can convert into a DSixTools WC output file."""
d = OrderedDict()
for name, (block, i) in WC_dict_0f.items():
if block not in d:
d[block] = defaultdict(list)
if wc[name] != 0:
d[block]['values'].append([i, wc[name].real])
for name in definitions.WC_keys_2f:
reblock = 'WC'+name.upper()
imblock = 'IMWC'+name.upper()
if reblock not in d:
d[reblock] = defaultdict(list)
if imblock not in d:
d[imblock] = defaultdict(list)
for i in range(3):
for j in range(3):
if (i, j) in definitions.redundant_elements[name] and skip_redundant:
# skip redundant elements
continue
if wc[name][i, j].real != 0 or not skip_zero:
d[reblock]['values'].append([i+1, j+1, float(wc[name][i, j].real)])
if wc[name][i, j].imag != 0 or not skip_zero:
# omit Im parts that have to vanish by symmetry
if (i, j) not in definitions.vanishing_im_parts[name]:
d[imblock]['values'].append([i+1, j+1, float(wc[name][i, j].imag)])
for name in definitions.WC_keys_4f:
reblock = 'WC'+name.upper()
imblock = 'IMWC'+name.upper()
if reblock not in d:
d[reblock] = defaultdict(list)
if imblock not in d:
d[imblock] = defaultdict(list)
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
if (i, j, k, l) in definitions.redundant_elements[name] and skip_redundant:
# skip redundant elements
continue
if wc[name][i, j, k, l].real != 0 or not skip_zero:
d[reblock]['values'].append([i+1, j+1, k+1, l+1, float(wc[name][i, j, k, l].real)])
if wc[name][i, j, k, l].imag != 0 or not skip_zero:
# omit Im parts that have to vanish by symmetry
if (i, j, k, l) not in definitions.vanishing_im_parts[name]:
d[imblock]['values'].append([i+1, j+1, k+1, l+1, float(wc[name][i, j, k, l].imag)])
# remove empty blocks
empty = []
for block in d:
if d[block] == {}:
empty.append(block)
for block in empty:
del d[block]
return {'BLOCK': d} | Convert a a dictionary of Wilson coefficients into
a dictionary that pylha can convert into a DSixTools WC output file. |
def dimension(self):
""" output dimension """
if self.dim > -1:
return self.dim
d = None
if self.dim != -1 and not self._estimated: # fixed parametrization
d = self.dim
elif self._estimated: # parametrization finished. Dimension is known
dim = len(self.eigenvalues)
if self.var_cutoff < 1.0: # if subspace_variance, reduce the output dimension if needed
dim = min(dim, np.searchsorted(self.cumvar, self.var_cutoff) + 1)
d = dim
elif self.var_cutoff == 1.0: # We only know that all dimensions are wanted, so return input dim
d = self.data_producer.dimension()
else: # We know nothing. Give up
raise RuntimeError('Requested dimension, but the dimension depends on the cumulative variance and the '
'transformer has not yet been estimated. Call estimate() before.')
return d | output dimension |
def getInspectorActionById(self, identifier):
""" Sets the inspector and draw the contents
Triggers the corresponding action so that it is checked in the menus.
"""
for action in self.inspectorActionGroup.actions():
if action.data() == identifier:
return action
raise KeyError("No action found with ID: {!r}".format(identifier)) | Sets the inspector and draw the contents
Triggers the corresponding action so that it is checked in the menus. |
def suspend(self, instance_id):
'''
Suspend a server
'''
nt_ks = self.compute_conn
response = nt_ks.servers.suspend(instance_id)
return True | Suspend a server |
def createIndex(self, table, fields, where = '', whereValues = []) :
"""Creates indexes for Raba Class a fields resulting in significantly faster SELECTs but potentially slower UPADTES/INSERTS and a bigger DBs
Fields can be a list of fields for Multi-Column Indices, or siply the name of one single field.
With the where close you can create a partial index by adding conditions
-----
only for sqlite 3.8.0+
where : optional ex: name = ? AND hair_color = ?
whereValues : optional, ex: ["britney", 'black']
"""
versioTest = sq.sqlite_version_info[0] >= 3 and sq.sqlite_version_info[1] >= 8
if len(where) > 0 and not versioTest :
#raise FutureWarning("Partial joints (with the WHERE clause) where only implemented in sqlite 3.8.0+, your version is: %s. Sorry about that." % sq.sqlite_version)
sys.stderr.write("WARNING: IGNORING THE \"WHERE\" CLAUSE in INDEX. Partial indexes where only implemented in sqlite 3.8.0+, your version is: %s. Sorry about that.\n" % sq.sqlite_version)
indexTable = self.makeIndexTableName(table, fields)
else :
indexTable = self.makeIndexTableName(table, fields, where, whereValues)
if type(fields) is types.ListType :
sql = "CREATE INDEX IF NOT EXISTS %s on %s(%s)" %(indexTable, table, ', '.join(fields))
else :
sql = "CREATE INDEX IF NOT EXISTS %s on %s(%s)" %(indexTable, table, fields)
if len(where) > 0 and versioTest:
sql = "%s WHERE %s;" % (sql, where)
self.execute(sql, whereValues)
else :
self.execute(sql) | Creates indexes for Raba Class a fields resulting in significantly faster SELECTs but potentially slower UPADTES/INSERTS and a bigger DBs
Fields can be a list of fields for Multi-Column Indices, or siply the name of one single field.
With the where close you can create a partial index by adding conditions
-----
only for sqlite 3.8.0+
where : optional ex: name = ? AND hair_color = ?
whereValues : optional, ex: ["britney", 'black'] |
def run(self, dag):
"""Run one pass of the lookahead mapper on the provided DAG.
Args:
dag (DAGCircuit): the directed acyclic graph to be mapped
Returns:
DAGCircuit: A dag mapped to be compatible with the coupling_map in
the property_set.
Raises:
TranspilerError: if the coupling map or the layout are not
compatible with the DAG
"""
coupling_map = self._coupling_map
ordered_virtual_gates = list(dag.serial_layers())
if self.initial_layout is None:
if self.property_set["layout"]:
self.initial_layout = self.property_set["layout"]
else:
self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())
if len(dag.qubits()) != len(self.initial_layout):
raise TranspilerError('The layout does not match the amount of qubits in the DAG')
if len(self._coupling_map.physical_qubits) != len(self.initial_layout):
raise TranspilerError(
"Mappers require to have the layout to be the same size as the coupling map")
mapped_gates = []
layout = self.initial_layout.copy()
gates_remaining = ordered_virtual_gates.copy()
while gates_remaining:
best_step = _search_forward_n_swaps(layout, gates_remaining,
coupling_map)
layout = best_step['layout']
gates_mapped = best_step['gates_mapped']
gates_remaining = best_step['gates_remaining']
mapped_gates.extend(gates_mapped)
# Preserve input DAG's name, regs, wire_map, etc. but replace the graph.
mapped_dag = _copy_circuit_metadata(dag, coupling_map)
for node in mapped_gates:
mapped_dag.apply_operation_back(op=node.op, qargs=node.qargs, cargs=node.cargs)
return mapped_dag | Run one pass of the lookahead mapper on the provided DAG.
Args:
dag (DAGCircuit): the directed acyclic graph to be mapped
Returns:
DAGCircuit: A dag mapped to be compatible with the coupling_map in
the property_set.
Raises:
TranspilerError: if the coupling map or the layout are not
compatible with the DAG |
def configure(default=None, dev=None):
"""
The inner control loops for user interaction during quickstart
configuration.
"""
cache_loc = openaccess_epub.utils.cache_location()
config_loc = openaccess_epub.utils.config_location()
#Make the cache directory
openaccess_epub.utils.mkdir_p(cache_loc)
defaults = {'now': time.asctime(),
'oae-version': openaccess_epub.__version__,
'cache-location': unix_path_coercion(cache_loc),
'input-relative-images': 'images-*',
'use-input-relative-images': 'y',
'image-cache': os.path.join(cache_loc, 'img_cache'),
'use-image-cache': 'n',
'use-image-fetching': 'y',
'default-output': '.',
'input-relative-css': '.',
'epubcheck-jarfile': os.path.join(cache_loc,
'epubcheck-3.0',
'epubcheck-3.0.jar')}
if default or dev: # Skip interactive and apply defaults
#Pass through the validation/modification steps
if dev: # The only current difference between dev and default
defaults['use-image-cache'] = 'y'
defaults['input-relative-images'] = list_opts(defaults['input-relative-images'])
defaults['use-input-relative-images'] = boolean(defaults['use-input-relative-images'])
defaults['image-cache'] = absolute_path(defaults['image-cache'])
defaults['use-image-cache'] = boolean(defaults['use-image-cache'])
defaults['use-image-fetching'] = boolean(defaults['use-image-fetching'])
defaults['default-output'] = nonempty(defaults['default-output'])
defaults['input-relative-css'] = nonempty(defaults['input-relative-css'])
defaults['epubcheck-jarfile'] = absolute_path(defaults['epubcheck-jarfile'])
config = config_formatter(CONFIG_TEXT, defaults)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8'))
print('The config file has been written to {0}'.format(config_loc))
return
config_dict = {'now': time.asctime(),
'oae-version': openaccess_epub.__version__,
'cache-location': unix_path_coercion(cache_loc)}
print('''\nWelcome to the interactive configuration for OpenAccess_EPUB''')
print('''
Please enter values for the following settings. To accept the default value
for the settings, shown in brackets, just push Enter.
-------------------------------------------------------------------------------\
''')
print('''
OpenAccess_EPUB defines a default cache location for the storage of various
data (and the global config.py file), this location is:\n\n{0}
'''.format(cache_loc))
input('Press Enter to start...')
#Image Configuration
print('''
-- Configure Image Behavior --
When OpenAccess_EPUB is executed using the oaepub script, it can find the
images for the input articles using the following strategies (in order of
preference):
Input-Relative: a path relative to the input file
Cached Images: locate the images in a cache
Fetched Online: attempts to download from the Internet (may fail)
We'll configure some values for each of these, and you\'ll also have the option
to turn them off.''')
#Input-relative image details
print('''
Where should OpenAccess_EPUB look for images relative to the input file?
A star "*" may be used as a wildcard to match the name of the input file.
Multiple path values may be specified if separated by commas.''')
user_prompt(config_dict, 'input-relative-images', 'Input-relative images?:',
default=defaults['input-relative-images'], validator=list_opts)
print('''
Should OpenAccess_EPUB look for images relative to the input file by default?\
''')
user_prompt(config_dict, 'use-input-relative-images',
'Use input-relative images?: (Y/n)',
default=defaults['use-input-relative-images'],
validator=boolean)
#Image cache details
print('''
Where should OpenAccess_EPUB place the image cache?''')
user_prompt(config_dict, 'image-cache', 'Image cache?:',
default=defaults['image-cache'],
validator=absolute_path)
print('''
Should OpenAccess_EPUB use the image cache by default? This feature is intended
for developers and testers without local access to the image files and will
consume extra disk space for storage.''')
user_prompt(config_dict, 'use-image-cache', 'Use image cache?: (y/N)',
default=defaults['use-image-cache'],
validator=boolean)
#Image fetching online details
print('''
Should OpenAccess_EPUB attempt to download the images from the Internet? This
is not supported for all publishers and not 100% guaranteed to succeed, you may
need to download them manually if this does not work.''')
user_prompt(config_dict, 'use-image-fetching', 'Attempt image download?: (Y/n)',
default=defaults['use-image-fetching'],
validator=boolean)
#Output configuration
print('''
-- Configure Output Behavior --
OpenAccess_EPUB produces ePub and log files as output. The following options
will determine what is done with these.
Where should OpenAccess_EPUB place the output ePub and log files? If you supply
a relative path, the output path will be relative to the input; if you supply
an absolute path, the output will always be placed there. The default behavior
is to place them in the same directory as the input.''')
user_prompt(config_dict, 'default-output', 'Output path?:',
default=defaults['default-output'],
validator=nonempty)
print('''
-- Configure CSS Behavior --
ePub files use CSS for improved styling, and ePub-readers must support a basic
subset of CSS functions. OpenAccess_EPUB provides a default CSS file, but a
manual one may be supplied, relative to the input. Please define an
appropriate input-relative path.''')
user_prompt(config_dict, 'input-relative-css', 'Input-relative CSS path?:',
default=defaults['input-relative-css'],
validator=nonempty)
print('''
-- Configure EpubCheck --
EpubCheck is a program written and maintained by the IDPF as a tool to validate
ePub. In order to use it, your system must have Java installed and it is
recommended to use the latest version. Downloads of this program are found here:
https://github.com/IDPF/epubcheck/releases
Once you have downloaded the zip file for the program, unzip the archive and
write a path to the .jar file here.''')
user_prompt(config_dict, 'epubcheck-jarfile', 'Absolute path to epubcheck?:',
default=defaults['epubcheck-jarfile'], validator=absolute_path)
#Write the config.py file
config = config_formatter(CONFIG_TEXT, config_dict)
with open(config_loc, 'wb') as conf_out:
conf_out.write(bytes(config, 'UTF-8'))
print('''
Done configuring OpenAccess_EPUB!''') | The inner control loops for user interaction during quickstart
configuration. |
def _is_path(s):
"""Return whether an object is a path."""
if isinstance(s, string_types):
try:
return op.exists(s)
except (OSError, ValueError):
return False
else:
return False | Return whether an object is a path. |
def delete_message(self, chat_id, message_id):
"""
Use this method to delete message. Returns True on success.
:param chat_id: in which chat to delete
:param message_id: which message to delete
:return: API reply.
"""
return apihelper.delete_message(self.token, chat_id, message_id) | Use this method to delete message. Returns True on success.
:param chat_id: in which chat to delete
:param message_id: which message to delete
:return: API reply. |
def _start(self):
'''Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot.
'''
if self.whoami is None:
me = self.get_me()
if me.get('ok', False):
self.whoami = me['result']
else:
raise ValueError('Bot Cannot request information, check '
'api_key') | Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot. |
def xross_listener(http_method=None, **xross_attrs):
"""Instructs xross to handle AJAX calls right from the moment it is called.
This should be placed in a view decorated with `@xross_view()`.
:param str http_method: GET or POST. To be used as a source of data for xross.
:param dict xross_attrs: xross handler attributes.
Those attributes will be available in operation functions in `xross` keyword argument.
"""
handler = currentframe().f_back.f_locals['request']._xross_handler
handler.set_attrs(**xross_attrs)
if http_method is not None:
handler.http_method = http_method
handler.dispatch() | Instructs xross to handle AJAX calls right from the moment it is called.
This should be placed in a view decorated with `@xross_view()`.
:param str http_method: GET or POST. To be used as a source of data for xross.
:param dict xross_attrs: xross handler attributes.
Those attributes will be available in operation functions in `xross` keyword argument. |
def pvremove(devices, override=True):
'''
Remove a physical device being used as an LVM physical volume
override
Skip devices, if they are already not used as LVM physical volumes
CLI Examples:
.. code-block:: bash
salt mymachine lvm.pvremove /dev/sdb1,/dev/sdb2
'''
if isinstance(devices, six.string_types):
devices = devices.split(',')
cmd = ['pvremove', '-y']
for device in devices:
if pvdisplay(device):
cmd.append(device)
elif not override:
raise CommandExecutionError('{0} is not a physical volume'.format(device))
if not cmd[2:]:
# Nothing to do
return True
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out.get('retcode'):
raise CommandExecutionError(out.get('stderr'))
# Verify pvcremove was successful
for device in devices:
if pvdisplay(device, quiet=True):
raise CommandExecutionError('Device "{0}" was not affected.'.format(device))
return True | Remove a physical device being used as an LVM physical volume
override
Skip devices, if they are already not used as LVM physical volumes
CLI Examples:
.. code-block:: bash
salt mymachine lvm.pvremove /dev/sdb1,/dev/sdb2 |
def set_tags(name=None,
tags=None,
call=None,
location=None,
instance_id=None,
resource_id=None,
kwargs=None): # pylint: disable=W0613
'''
Set tags for a resource. Normally a VM name or instance_id is passed in,
but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a set_tags mymachine tag1=somestuff tag2='Other stuff'
salt-cloud -a set_tags resource_id=vol-3267ab32 tag=somestuff
'''
if kwargs is None:
kwargs = {}
if location is None:
location = get_location()
if instance_id is None:
if 'resource_id' in kwargs:
resource_id = kwargs['resource_id']
del kwargs['resource_id']
if 'instance_id' in kwargs:
instance_id = kwargs['instance_id']
del kwargs['instance_id']
if resource_id is None:
if instance_id is None:
instance_id = _get_node(name=name, instance_id=None, location=location)['instanceId']
else:
instance_id = resource_id
# This second check is a safety, in case the above still failed to produce
# a usable ID
if instance_id is None:
return {
'Error': 'A valid instance_id or resource_id was not specified.'
}
params = {'Action': 'CreateTags',
'ResourceId.1': instance_id}
log.debug('Tags to set for %s: %s', name, tags)
if kwargs and not tags:
tags = kwargs
for idx, (tag_k, tag_v) in enumerate(six.iteritems(tags)):
params['Tag.{0}.Key'.format(idx)] = tag_k
params['Tag.{0}.Value'.format(idx)] = tag_v
attempts = 0
while attempts < aws.AWS_MAX_RETRIES:
aws.query(params,
setname='tagSet',
location=location,
provider=get_provider(),
opts=__opts__,
sigver='4')
settags = get_tags(
instance_id=instance_id, call='action', location=location
)
log.debug('Setting the tags returned: %s', settags)
failed_to_set_tags = False
for tag in settags:
if tag['key'] not in tags:
# We were not setting this tag
continue
if tag.get('value') is None and tags.get(tag['key']) == '':
# This is a correctly set tag with no value
continue
if six.text_type(tags.get(tag['key'])) != six.text_type(tag['value']):
# Not set to the proper value!?
log.debug(
'Setting the tag %s returned %s instead of %s',
tag['key'], tags.get(tag['key']), tag['value']
)
failed_to_set_tags = True
break
if failed_to_set_tags:
log.warning('Failed to set tags. Remaining attempts %s', attempts)
attempts += 1
aws.sleep_exponential_backoff(attempts)
continue
return settags
raise SaltCloudSystemExit(
'Failed to set tags on {0}!'.format(name)
) | Set tags for a resource. Normally a VM name or instance_id is passed in,
but a resource_id may be passed instead. If both are passed in, the
instance_id will be used.
CLI Examples:
.. code-block:: bash
salt-cloud -a set_tags mymachine tag1=somestuff tag2='Other stuff'
salt-cloud -a set_tags resource_id=vol-3267ab32 tag=somestuff |
def AddRoute(self, short_name, long_name, route_type, route_id=None):
"""Add a route to this schedule.
Args:
short_name: Short name of the route, such as "71L"
long_name: Full name of the route, such as "NW 21st Ave/St Helens Rd"
route_type: A type such as "Tram", "Subway" or "Bus"
route_id: id of the route or None, in which case a unique id is picked
Returns:
A new Route object
"""
if route_id is None:
route_id = util.FindUniqueId(self.routes)
route = self._gtfs_factory.Route(short_name=short_name, long_name=long_name,
route_type=route_type, route_id=route_id)
route.agency_id = self.GetDefaultAgency().agency_id
self.AddRouteObject(route)
return route | Add a route to this schedule.
Args:
short_name: Short name of the route, such as "71L"
long_name: Full name of the route, such as "NW 21st Ave/St Helens Rd"
route_type: A type such as "Tram", "Subway" or "Bus"
route_id: id of the route or None, in which case a unique id is picked
Returns:
A new Route object |
def get_lifecycle(self, policy=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html>`_
:arg policy: The name of the index lifecycle policy
"""
return self.transport.perform_request(
"GET", _make_path("_ilm", "policy", policy), params=params
) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html>`_
:arg policy: The name of the index lifecycle policy |
def calc_size_and_sha265(content: io.IOBase, chunk_size: int):
"""Calculates the size and the sha2566 value of the content."""
size = 0
sha256 = hashlib.sha256()
content.seek(0, io.SEEK_SET)
while True:
buf = content.read(chunk_size)
length = len(buf)
size += length
sha256.update(buf)
if length != chunk_size:
break
return size, sha256.hexdigest() | Calculates the size and the sha2566 value of the content. |
def import_single_vpn_path_to_all_vrfs(self, vpn_path, path_rts=None):
"""Imports *vpn_path* to qualifying VRF tables.
Import RTs of VRF table is matched with RTs from *vpn4_path* and if we
have any common RTs we import the path into VRF.
"""
LOG.debug('Importing path %s to qualifying VRFs', vpn_path)
# If this path has no RTs we are done.
if not path_rts:
LOG.info('Encountered a path with no RTs: %s', vpn_path)
return
# We match path RTs with all VRFs that are interested in them.
interested_tables = set()
# Get route family of VRF to when this VPN Path can be imported to
if vpn_path.route_family == RF_IPv4_VPN:
route_family = RF_IPv4_UC
elif vpn_path.route_family == RF_IPv6_VPN:
route_family = RF_IPv6_UC
elif vpn_path.route_family == RF_L2_EVPN:
route_family = RF_L2_EVPN
elif vpn_path.route_family == RF_VPNv4_FLOWSPEC:
route_family = RF_IPv4_FLOWSPEC
elif vpn_path.route_family == RF_VPNv6_FLOWSPEC:
route_family = RF_IPv6_FLOWSPEC
elif vpn_path.route_family == RF_L2VPN_FLOWSPEC:
route_family = RF_L2VPN_FLOWSPEC
else:
raise ValueError('Unsupported route family for VRF: %s' %
vpn_path.route_family)
for rt in path_rts:
rt_rf_id = rt + ':' + str(route_family)
vrf_rt_tables = self._tables_for_rt.get(rt_rf_id)
if vrf_rt_tables:
interested_tables.update(vrf_rt_tables)
if interested_tables:
# We iterate over all VRF tables that are interested in the RT
# of the given path and import this path into them.
route_dist = vpn_path.nlri.route_dist
for vrf_table in interested_tables:
if (vpn_path.source is not None or
route_dist != vrf_table.vrf_conf.route_dist):
update_vrf_dest = vrf_table.import_vpn_path(vpn_path)
# Queue the destination for further processing.
if update_vrf_dest is not None:
self._signal_bus.\
dest_changed(update_vrf_dest)
else:
# If we do not have any VRF with import RT that match with path RT
LOG.debug('No VRF table found that imports RTs: %s', path_rts) | Imports *vpn_path* to qualifying VRF tables.
Import RTs of VRF table is matched with RTs from *vpn4_path* and if we
have any common RTs we import the path into VRF. |
def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems | Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing. |
def zoom_in(self, action=None, channel=0):
"""
Params:
action - start or stop
channel - channel number
The magic of zoom in 1x, 2x etc. is the timer between the cmd
'start' and cmd 'stop'. My suggestion for start/stop cmd is 0.5 sec
"""
ret = self.command(
'ptz.cgi?action={0}&channel={1}&code=ZoomTele&arg1=0'
'&arg2=0&arg3=0'.format(action, channel)
)
return ret.content.decode('utf-8') | Params:
action - start or stop
channel - channel number
The magic of zoom in 1x, 2x etc. is the timer between the cmd
'start' and cmd 'stop'. My suggestion for start/stop cmd is 0.5 sec |
def weighted_std(values, weights):
""" Calculate standard deviation weighted by errors """
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights)
return np.sqrt(variance) | Calculate standard deviation weighted by errors |
def cache_result(func):
"""
Decorator to cache the result of functions that take a ``user`` and a
``size`` value.
"""
def cache_set(key, value):
cache.set(key, value, AVATAR_CACHE_TIMEOUT)
return value
def cached_func(user, size):
prefix = func.__name__
cached_funcs.add(prefix)
key = get_cache_key(user, size, prefix=prefix)
return cache.get(key) or cache_set(key, func(user, size))
return cached_func | Decorator to cache the result of functions that take a ``user`` and a
``size`` value. |
def _convert_to_cwl_json(data, fnargs, input_files):
"""Convert world data object (or list of data objects) into outputs for CWL ingestion.
"""
out = {}
for outvar in _get_output_cwl_keys(fnargs):
keys = []
for key in outvar.split("__"):
try:
key = int(key)
except ValueError:
pass
keys.append(key)
if isinstance(data, dict):
out[outvar] = _to_cwl(tz.get_in(keys, data), input_files)
else:
out[outvar] = [_to_cwl(tz.get_in(keys, x), input_files) for x in data]
return out | Convert world data object (or list of data objects) into outputs for CWL ingestion. |
def zoning_defined_configuration_cfg_cfg_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
zoning = ET.SubElement(config, "zoning", xmlns="urn:brocade.com:mgmt:brocade-zone")
defined_configuration = ET.SubElement(zoning, "defined-configuration")
cfg = ET.SubElement(defined_configuration, "cfg")
cfg_name = ET.SubElement(cfg, "cfg-name")
cfg_name.text = kwargs.pop('cfg_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def get_humidity(self):
"""
Returns the percentage of relative humidity
"""
self._init_humidity() # Ensure humidity sensor is initialised
humidity = 0
data = self._humidity.humidityRead()
if (data[0]): # Humidity valid
humidity = data[1]
return humidity | Returns the percentage of relative humidity |
def add_local_option(self, *args, **kw):
"""
Adds a local option to the parser.
This is initiated by a SetOption() call to add a user-defined
command-line option. We add the option to a separate option
group for the local options, creating the group if necessary.
"""
try:
group = self.local_option_group
except AttributeError:
group = SConsOptionGroup(self, 'Local Options')
group = self.add_option_group(group)
self.local_option_group = group
result = group.add_option(*args, **kw)
if result:
# The option was added successfully. We now have to add the
# default value to our object that holds the default values
# (so that an attempt to fetch the option's attribute will
# yield the default value when not overridden) and then
# we re-parse the leftover command-line options, so that
# any value overridden on the command line is immediately
# available if the user turns around and does a GetOption()
# right away.
setattr(self.values.__defaults__, result.dest, result.default)
self.reparse_local_options()
return result | Adds a local option to the parser.
This is initiated by a SetOption() call to add a user-defined
command-line option. We add the option to a separate option
group for the local options, creating the group if necessary. |
def total_variation(domain, grad=None):
"""Total variation functional.
Parameters
----------
domain : odlspace
domain of TV functional
grad : gradient operator, optional
Gradient operator of the total variation functional. This may be any
linear operator and thereby generalizing TV. default=forward
differences with Neumann boundary conditions
Examples
--------
Check that the total variation of a constant is zero
>>> import odl.contrib.spdhg as spdhg, odl
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tv = spdhg.total_variation(space)
>>> x = space.one()
>>> tv(x) < 1e-10
"""
if grad is None:
grad = odl.Gradient(domain, method='forward', pad_mode='symmetric')
grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2))
else:
grad = grad
f = odl.solvers.GroupL1Norm(grad.range, exponent=2)
return f * grad | Total variation functional.
Parameters
----------
domain : odlspace
domain of TV functional
grad : gradient operator, optional
Gradient operator of the total variation functional. This may be any
linear operator and thereby generalizing TV. default=forward
differences with Neumann boundary conditions
Examples
--------
Check that the total variation of a constant is zero
>>> import odl.contrib.spdhg as spdhg, odl
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tv = spdhg.total_variation(space)
>>> x = space.one()
>>> tv(x) < 1e-10 |
def cached_property(getter):
"""
Decorator that converts a method into memoized property.
The decorator works as expected only for classes with
attribute '__dict__' and immutable properties.
"""
def decorator(self):
key = "_cached_property_" + getter.__name__
if not hasattr(self, key):
setattr(self, key, getter(self))
return getattr(self, key)
decorator.__name__ = getter.__name__
decorator.__module__ = getter.__module__
decorator.__doc__ = getter.__doc__
return property(decorator) | Decorator that converts a method into memoized property.
The decorator works as expected only for classes with
attribute '__dict__' and immutable properties. |
def is_compliant(self, path):
"""
Given a set of content matching cases i.e. tuple(regex, bool) where
bool value denotes whether or not regex is expected to match, check that
all cases match as expected with the contents of the file. Cases can be
expected to pass of fail.
:param path: Path of file to check.
:returns: Boolean value representing whether or not all cases are
found to be compliant.
"""
log("Auditing contents of file '%s'" % (path), level=DEBUG)
with open(path, 'r') as fd:
contents = fd.read()
matches = 0
for pattern in self.pass_cases:
key = re.compile(pattern, flags=re.MULTILINE)
results = re.search(key, contents)
if results:
matches += 1
else:
log("Pattern '%s' was expected to pass but instead it failed"
% (pattern), level=WARNING)
for pattern in self.fail_cases:
key = re.compile(pattern, flags=re.MULTILINE)
results = re.search(key, contents)
if not results:
matches += 1
else:
log("Pattern '%s' was expected to fail but instead it passed"
% (pattern), level=WARNING)
total = len(self.pass_cases) + len(self.fail_cases)
log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
return matches == total | Given a set of content matching cases i.e. tuple(regex, bool) where
bool value denotes whether or not regex is expected to match, check that
all cases match as expected with the contents of the file. Cases can be
expected to pass of fail.
:param path: Path of file to check.
:returns: Boolean value representing whether or not all cases are
found to be compliant. |
def pauseMovie(self):
"""Pause button handler."""
if self.state == self.PLAYING:
self.sendRtspRequest(self.PAUSE) | Pause button handler. |
def _fault_to_exception(f):
""" Converts XML-RPC Fault objects to Pynipap-exceptions.
TODO: Is this one neccesary? Can be done inline...
"""
e = _fault_to_exception_map.get(f.faultCode)
if e is None:
e = NipapError
return e(f.faultString) | Converts XML-RPC Fault objects to Pynipap-exceptions.
TODO: Is this one neccesary? Can be done inline... |
def override_cluster_spec(self, srd):
"""
Returns SystemRequirementsDict can be passed in a "systemRequirements"
input to app-xxx/run, e.g. {'fn': {'clusterSpec': {initialInstanceCount: 3, version: "2.4.0", ..}}}
Since full clusterSpec must be passed to the API server, we need to retrieve the cluster
spec defined in app doc's systemRequirements and overwrite the field initialInstanceCount
with the value the user passed to dx run for each entrypoint.
initialInstanceCount is currently the only clusterSpec's field the user is allowed to change
at runtime.
A few scenarios when requesting instance count for different entrypoints with dx run
and the resulting merged systemRequirements (merged_cluster_spec). The bootstapScript
field here is only one of many (version, ports, etc) that should be copied from app
spec to merged_cluster_spec:
Requested: {"*": 5}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 5, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 5, bootstrapScript: "y.sh"}}
Requested: {"*": 15}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"},
"*": "clusterSpec": {"initialInstanceCount": 11, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "y.sh"},
"*": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "y.sh"}}
Requested: {"main": 12}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 12, bootstrapScript: "x.sh"}}
Requested: {"main": 33}
App doc: {"*": "clusterSpec": {"initialInstanceCount": 2, bootstrapScript: "z.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 33, bootstrapScript: "z.sh"}}
Requested: {"main": 22, "*": 11}
App doc: {"*": "clusterSpec": {"initialInstanceCount": 2, bootstrapScript: "t.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 22, bootstrapScript: "t.sh"},
"*": "clusterSpec": {"initialInstanceCount": 11, bootstrapScript: "t.sh"}}
"""
merged_cluster_spec = copy.deepcopy(self.entrypoints)
# Remove entrypoints without "clusterSpec"
merged_cluster_spec = dict([(k, v) for k, v in merged_cluster_spec.items() if v.get("clusterSpec") is not None])
# Remove entrypoints not provided in requested instance counts
merged_cluster_spec = dict([(k, v) for k, v in merged_cluster_spec.items() if \
k in srd.entrypoints or "*" in srd.entrypoints])
# Overwrite values of self.entrypoints.clusterSpec with the ones from srd
# Named entrypoint takes precedence over the wildcard
for entry_pt, req in merged_cluster_spec.items():
merged_cluster_spec[entry_pt]["clusterSpec"].update(
srd.entrypoints.get(entry_pt, srd.entrypoints.get("*"))["clusterSpec"])
# Check if all entrypoints in srd are included in merged_cluster_spec
# (if a named entrypoint was used in srd and such an entrypoint doesn't exist
# in app sys req, we need to take the cluster spec from the app's "*", if it exists)
for entry_pt, req in srd.entrypoints.items():
if entry_pt not in merged_cluster_spec and "*" in self.entrypoints and "clusterSpec" in self.entrypoints["*"]:
merged_cluster_spec[entry_pt] = {"clusterSpec": copy.deepcopy(self.entrypoints["*"]["clusterSpec"])}
merged_cluster_spec[entry_pt]["clusterSpec"].update(req["clusterSpec"])
return SystemRequirementsDict(merged_cluster_spec) | Returns SystemRequirementsDict can be passed in a "systemRequirements"
input to app-xxx/run, e.g. {'fn': {'clusterSpec': {initialInstanceCount: 3, version: "2.4.0", ..}}}
Since full clusterSpec must be passed to the API server, we need to retrieve the cluster
spec defined in app doc's systemRequirements and overwrite the field initialInstanceCount
with the value the user passed to dx run for each entrypoint.
initialInstanceCount is currently the only clusterSpec's field the user is allowed to change
at runtime.
A few scenarios when requesting instance count for different entrypoints with dx run
and the resulting merged systemRequirements (merged_cluster_spec). The bootstapScript
field here is only one of many (version, ports, etc) that should be copied from app
spec to merged_cluster_spec:
Requested: {"*": 5}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 5, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 5, bootstrapScript: "y.sh"}}
Requested: {"*": 15}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"},
"*": "clusterSpec": {"initialInstanceCount": 11, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "y.sh"},
"*": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "y.sh"}}
Requested: {"main": 12}
App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"},
"other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 12, bootstrapScript: "x.sh"}}
Requested: {"main": 33}
App doc: {"*": "clusterSpec": {"initialInstanceCount": 2, bootstrapScript: "z.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 33, bootstrapScript: "z.sh"}}
Requested: {"main": 22, "*": 11}
App doc: {"*": "clusterSpec": {"initialInstanceCount": 2, bootstrapScript: "t.sh"}}
Merged: {"main": "clusterSpec": {"initialInstanceCount": 22, bootstrapScript: "t.sh"},
"*": "clusterSpec": {"initialInstanceCount": 11, bootstrapScript: "t.sh"}} |
def install_theme(path_to_theme):
"""
Pass a path to a theme file which will be extracted to the themes directory.
"""
pref_init()
# cp the file
filename = basename(path_to_theme)
dest = join(THEMES_DIR, filename)
copy(path_to_theme, dest)
# unzip
zf = zipfile.ZipFile(dest)
# should make sure zipfile contains only themename folder which doesn't conflict
# with existing themename. Or some kind of sanity check
zf.extractall(THEMES_DIR) # plus this is a potential security flaw pre 2.7.4
# remove the copied zipfile
unlink(dest) | Pass a path to a theme file which will be extracted to the themes directory. |
def marketShortInterest(date=None, token='', version=''):
'''The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.
The report data will be published daily at 4:00pm ET.
https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev
Args:
date (datetime); Effective Datetime
token (string); Access token
version (string); API version
Returns:
dict: result
'''
if date:
date = _strOrDate(date)
return _getJson('stock/market/short-interest/' + date, token, version)
return _getJson('stock/market/short-interest', token, version) | The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.
The report data will be published daily at 4:00pm ET.
https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev
Args:
date (datetime); Effective Datetime
token (string); Access token
version (string); API version
Returns:
dict: result |
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
return parser.add_argument_group(*self.args, **self.kwds) | Add this object's information to the parser. |
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
"""
Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted index if inplace=False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
if level is not None:
raise NotImplementedError("level is not implemented")
if inplace:
raise NotImplementedError("inplace is not implemented")
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis}) | Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted index if inplace=False, None otherwise. |
def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not self._file_object_set_in_init and not path_spec:
raise ValueError('Missing path specification.')
if self._file_object_set_in_init:
return
self._file_object = self._OpenFileObject(path_spec)
if not self._file_object:
raise IOError('Unable to open missing file-like object.') | Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. |
def batch_scan(points, xdist=20, ydist=20, N=5):
"""
runs synteny_scan() per chromosome pair
"""
chr_pair_points = group_hits(points)
clusters = []
for chr_pair in sorted(chr_pair_points.keys()):
points = chr_pair_points[chr_pair]
clusters.extend(synteny_scan(points, xdist, ydist, N))
return clusters | runs synteny_scan() per chromosome pair |
def get_install_requires():
"""Add conditional dependencies (when creating source distributions)."""
install_requires = get_requirements('requirements.txt')
if 'bdist_wheel' not in sys.argv:
if sys.version_info[0] == 2:
# On Python 2.6 and 2.7 we pull in Bazaar.
install_requires.append('bzr >= 2.6.0')
if sys.version_info[2:] == (2, 6):
# On Python 2.6 we have to stick to versions of Mercurial below 4.3
# because 4.3 drops support for Python 2.6, see the change log:
# https://www.mercurial-scm.org/wiki/WhatsNew
install_requires.append('mercurial >= 2.9, < 4.3')
elif (2, 6) < sys.version_info[:2] < (3, 0):
# On Python 2.7 we pull in Mercurial.
install_requires.append('mercurial >= 2.9')
return sorted(install_requires) | Add conditional dependencies (when creating source distributions). |
def _show(self, pk):
"""
show function logic, override to implement different logic
returns show and related list widget
"""
pages = get_page_args()
page_sizes = get_page_size_args()
orders = get_order_args()
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
widgets = self._get_show_widget(pk, item)
self.update_redirect()
return self._get_related_views_widgets(
item, orders=orders, pages=pages, page_sizes=page_sizes, widgets=widgets
) | show function logic, override to implement different logic
returns show and related list widget |
def close(self, code=None, reason=''):
"""
Close the socket by sending a CLOSE frame and waiting for a response
close message, unless such a message has already been received earlier
(prior to calling this function, for example). The onclose() handler is
called after the response has been received, but before the socket is
actually closed.
"""
self.send_close_frame(code, reason)
frame = self.sock.recv()
if frame.opcode != OPCODE_CLOSE:
raise ValueError('expected CLOSE frame, got %s' % frame)
self.handle_control_frame(frame) | Close the socket by sending a CLOSE frame and waiting for a response
close message, unless such a message has already been received earlier
(prior to calling this function, for example). The onclose() handler is
called after the response has been received, but before the socket is
actually closed. |
def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,
max_window=3000, variant_type='snv'):
"""Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int)
"""
if variant_type == 'snv':
nr_variants = case_obj['nr_variants']
else:
nr_variants = case_obj['nr_sv_variants']
nr_inserted = 0
case_id = case_obj['case_id']
if skip_case_id:
case_id = None
# Loop over the variants in the vcf
with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar:
variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)
if variant_type == 'sv':
for sv_variant in variants:
if not sv_variant:
continue
adapter.add_structural_variant(variant=sv_variant, max_window=max_window)
nr_inserted += 1
if variant_type == 'snv':
nr_inserted = adapter.add_variants(variants)
LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type)
return nr_inserted | Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int) |
def render(self, code):
"""Renders the barcode to whatever the inheriting writer provides,
using the registered callbacks.
:parameters:
code : List
List of strings matching the writer spec
(only contain 0 or 1).
"""
if self._callbacks['initialize'] is not None:
self._callbacks['initialize'](code)
ypos = 1.0
for line in code:
# Left quiet zone is x startposition
xpos = self.quiet_zone
for mod in line:
if mod == '0':
color = self.background
else:
color = self.foreground
self._callbacks['paint_module'](xpos, ypos, self.module_width,
color)
xpos += self.module_width
# Add right quiet zone to every line
self._callbacks['paint_module'](xpos, ypos, self.quiet_zone,
self.background)
ypos += self.module_height
if self.text and self._callbacks['paint_text'] is not None:
ypos += self.text_distance
if self.center_text:
xpos = xpos / 2.0
else:
xpos = self.quiet_zone + 4.0
self._callbacks['paint_text'](xpos, ypos)
return self._callbacks['finish']() | Renders the barcode to whatever the inheriting writer provides,
using the registered callbacks.
:parameters:
code : List
List of strings matching the writer spec
(only contain 0 or 1). |
def expand_as_args(args):
"""Returns `True` if `args` should be expanded as `*args`."""
return (isinstance(args, collections.Sequence) and
not _is_namedtuple(args) and not _force_leaf(args)) | Returns `True` if `args` should be expanded as `*args`. |
def warn_import_error(type_of_obj_support: str, caught: ImportError):
"""
Utility method to print a warning message about failed import of some modules
:param type_of_obj_support:
:param caught:
:return:
"""
msg = StringIO()
msg.writelines('Import Error while trying to add support for ' + type_of_obj_support + '. You may continue but '
'the associated parsers and converters wont be available : \n')
traceback.print_tb(caught.__traceback__, file=msg)
msg.writelines(str(caught.__class__.__name__) + ' : ' + str(caught) + '\n')
warn(msg.getvalue()) | Utility method to print a warning message about failed import of some modules
:param type_of_obj_support:
:param caught:
:return: |
def interface_by_name(self, name):
'''
Given a device name, return the corresponding interface object
'''
if name in self._devinfo:
return self._devinfo[name]
raise KeyError("No device named {}".format(name)) | Given a device name, return the corresponding interface object |
def substitute_harmonic(progression, substitute_index, ignore_suffix=False):
"""Do simple harmonic substitutions. Return a list of possible substitions
for progression[substitute_index].
If ignore_suffix is set to True the suffix of the chord being
substituted will be ignored. Otherwise only progressions without a
suffix, or with suffix '7' will be substituted.
The following table is used to convert progressions:
|| I || III ||
|| I || VI ||
|| IV || II ||
|| IV || VI ||
|| V || VII ||
"""
simple_substitutions = [('I', 'III'), ('I', 'VI'), ('IV', 'II'),
('IV', 'VI'), ('V', 'VII')]
res = []
(roman, acc, suff) = parse_string(progression[substitute_index])
if suff == '' or suff == '7' or ignore_suffix:
for subs in simple_substitutions:
r = subs[1] if roman == subs[0] else None
if r == None:
r = subs[0] if roman == subs[1] else None
if r != None:
suff = suff if suff == '7' else ''
res.append(tuple_to_string((r, acc, suff)))
return res | Do simple harmonic substitutions. Return a list of possible substitions
for progression[substitute_index].
If ignore_suffix is set to True the suffix of the chord being
substituted will be ignored. Otherwise only progressions without a
suffix, or with suffix '7' will be substituted.
The following table is used to convert progressions:
|| I || III ||
|| I || VI ||
|| IV || II ||
|| IV || VI ||
|| V || VII || |
def free_size(self, units="MiB"):
"""
Returns the volume group free size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
"""
self.open()
size = lvm_vg_get_free_size(self.handle)
self.close()
return size_convert(size, units) | Returns the volume group free size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. |
def length(self, vertices):
"""
Return the total length of the entity.
Returns
---------
length: float, total length of entity
"""
length = ((np.diff(self.discrete(vertices),
axis=0)**2).sum(axis=1)**.5).sum()
return length | Return the total length of the entity.
Returns
---------
length: float, total length of entity |
def create_vm(self, userid, cpu, memory, disk_list, profile,
max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam):
""" Create VM and add disks if specified. """
rd = ('makevm %(uid)s directory LBYONLY %(mem)im %(pri)s '
'--cpus %(cpu)i --profile %(prof)s --maxCPU %(max_cpu)i '
'--maxMemSize %(max_mem)s --setReservedMem' %
{'uid': userid, 'mem': memory,
'pri': const.ZVM_USER_DEFAULT_PRIVILEGE,
'cpu': cpu, 'prof': profile,
'max_cpu': max_cpu, 'max_mem': max_mem})
if CONF.zvm.default_admin_userid:
rd += (' --logonby "%s"' % CONF.zvm.default_admin_userid)
if (disk_list and 'is_boot_disk' in disk_list[0] and
disk_list[0]['is_boot_disk']):
# we assume at least one disk exist, which means, is_boot_disk
# is true for exactly one disk.
rd += (' --ipl %s' % self._get_ipl_param(ipl_from))
# load param for ipl
if ipl_param:
rd += ' --iplParam %s' % ipl_param
if ipl_loadparam:
rd += ' --iplLoadparam %s' % ipl_loadparam
action = "create userid '%s'" % userid
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 436) and (err.results['rs'] == 4)):
result = "Profile '%s'" % profile
raise exception.SDKObjectNotExistError(obj_desc=result,
modID='guest')
else:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
# Add the guest to db immediately after user created
action = "add guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.add_guest(userid)
# Continue to add disk
if disk_list:
# Add disks for vm
return self.add_mdisks(userid, disk_list) | Create VM and add disks if specified. |
def copy_script(self, filename, id_=-1):
"""Copy a script to all repositories.
Takes into account whether a JSS has been migrated. See the
individual DistributionPoint types for more information.
Args:
filename: String path to the local file to copy.
id_: Integer ID you wish to associate script with for a JDS
or CDP only. Default is -1, which is used for creating
a new script object in the database.
"""
for repo in self._children:
repo.copy_script(filename, id_) | Copy a script to all repositories.
Takes into account whether a JSS has been migrated. See the
individual DistributionPoint types for more information.
Args:
filename: String path to the local file to copy.
id_: Integer ID you wish to associate script with for a JDS
or CDP only. Default is -1, which is used for creating
a new script object in the database. |
def _parse_reflectivity(line, lines):
"""Parse Energy [eV] reflect_xx reflect_zz"""
split_line = line.split()
energy = float(split_line[0])
reflect_xx = float(split_line[1])
reflect_zz = float(split_line[2])
return {"energy": energy, "reflect_xx": reflect_xx, "reflect_zz": reflect_zz} | Parse Energy [eV] reflect_xx reflect_zz |
def load_yaml(file):
"""If pyyaml > 5.1 use full_load to avoid warning"""
if hasattr(yaml, "full_load"):
return yaml.full_load(file)
else:
return yaml.load(file) | If pyyaml > 5.1 use full_load to avoid warning |
def _tokenize(cls, sentence):
"""
Split a sentence while preserving tags.
"""
while True:
match = cls._regex_tag.search(sentence)
if not match:
yield from cls._split(sentence)
return
chunk = sentence[:match.start()]
yield from cls._split(chunk)
tag = match.group(0)
yield tag
sentence = sentence[(len(chunk) + len(tag)):] | Split a sentence while preserving tags. |
def get_items(self):
"""
Return the item models associated with this Publish group.
"""
from .layers import Layer
# no expansion support, just URLs
results = []
for url in self.items:
if '/layers/' in url:
r = self._client.request('GET', url)
results.append(self._client.get_manager(Layer).create_from_result(r.json()))
else:
raise NotImplementedError("No support for %s" % url)
return results | Return the item models associated with this Publish group. |
def load_from_ini(ini, default_section=_DEFAULT_SECTION):
"""
从单个配置文件读取配置
:param ini:
:param default_section:
:return:
"""
global _CONFIG_CACHE
if ini not in _CONFIG_CACHE:
if six.PY3:
logger.debug("PY3........")
_CONFIG_CACHE[ini] = _load_from_ini_py3(ini, default_section)
else:
_CONFIG_CACHE[ini] = _load_from_ini_py2(ini)
logger.debug(_CONFIG_CACHE[ini])
return _CONFIG_CACHE[ini] | 从单个配置文件读取配置
:param ini:
:param default_section:
:return: |
def walk(prev, inital_path, *args, **kw):
"""This pipe wrap os.walk and yield absolute path one by one.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param args: The end-of-line symbol for each output.
:type args: list of string.
:param kw: The end-of-line symbol for each output.
:type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol.
:returns: generator
"""
for dir_path, dir_names, filenames in os.walk(inital_path):
for filename in filenames:
yield os.path.join(dir_path, filename) | This pipe wrap os.walk and yield absolute path one by one.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param args: The end-of-line symbol for each output.
:type args: list of string.
:param kw: The end-of-line symbol for each output.
:type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol.
:returns: generator |
def get_signature(self, signature):
"""Retrieve one signature, discriminated by name or id.
Note that signature name is not case sensitive.
:param: a zobjects.Signature describing the signature
like "Signature(name='my-sig')"
:returns: a zobjects.Signature object, filled with the signature if no
signature is matching, returns None.
"""
resp = self.request_list('GetSignatures')
# GetSignature does not allow to filter the results, so we do it by
# hand...
if resp and (len(resp) > 0):
for sig_dict in resp:
sig = zobjects.Signature.from_dict(sig_dict)
if hasattr(signature, 'id'):
its_this_one = (sig.id == signature.id)
elif hasattr(signature, 'name'):
its_this_one = (sig.name.upper() == signature.name.upper())
else:
raise ValueError('should mention one of id,name')
if its_this_one:
return sig
else:
return None | Retrieve one signature, discriminated by name or id.
Note that signature name is not case sensitive.
:param: a zobjects.Signature describing the signature
like "Signature(name='my-sig')"
:returns: a zobjects.Signature object, filled with the signature if no
signature is matching, returns None. |
def get_cookbook_dirs(self, base_dir=None):
"""Find cookbook directories."""
if base_dir is None:
base_dir = self.env_root
cookbook_dirs = []
dirs_to_skip = set(['.git'])
for root, dirs, files in os.walk(base_dir): # pylint: disable=W0612
dirs[:] = [d for d in dirs if d not in dirs_to_skip]
for name in files:
if name == 'metadata.rb':
if 'cookbook' in os.path.basename(os.path.dirname(root)):
cookbook_dirs.append(root)
return cookbook_dirs | Find cookbook directories. |
def get_last_fingerprint(fullpath):
""" Get the last known modification time for a file """
record = model.FileFingerprint.get(file_path=fullpath)
if record:
return record.fingerprint
return None | Get the last known modification time for a file |
def p_lpartselect_lpointer(self, p):
'lpartselect : pointer LBRACKET expression COLON expression RBRACKET'
p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | lpartselect : pointer LBRACKET expression COLON expression RBRACKET |
def file_digest(source):
"""Calculates SHA256 digest of a file.
Args:
source: either a file-like object or a path to file
"""
hash_sha256 = hashlib.sha256()
should_close = False
if isinstance(source, six.string_types):
should_close = True
source = open(source, 'rb')
for chunk in iter(lambda: source.read(_BUFFER_SIZE), b''):
hash_sha256.update(chunk)
if should_close:
source.close()
return hash_sha256.hexdigest() | Calculates SHA256 digest of a file.
Args:
source: either a file-like object or a path to file |
def load_featured(data):
"""Load community featuring from data dump.
:param data: Dictionary containing community featuring data.
:type data: dict
"""
from invenio_communities.models import FeaturedCommunity
obj = FeaturedCommunity(id=data['id'],
id_community=data['id_community'],
start_date=iso2dt(data['start_date']))
db.session.add(obj)
db.session.commit() | Load community featuring from data dump.
:param data: Dictionary containing community featuring data.
:type data: dict |
def printImports(self):
"""Produce a report of dependencies."""
for module in self.listModules():
print("%s:" % module.label)
if self.external_dependencies:
imports = list(module.imports)
else:
imports = [modname for modname in module.imports
if modname in self.modules]
imports.sort()
print(" %s" % "\n ".join(imports)) | Produce a report of dependencies. |
def _parse_datapoints(self, parsed_duration, parsed_resolution, limit):
"""
Parse the number of datapoints of a query.
This can be calculated from the given duration and resolution of the query.
E.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds
then the number of datapoints would be 7200/10 => 7200 datapoints.
:param parsed_duration:
:param parsed_resolution:
:param limit:
:return:
"""
return self.datapoints_parser.parse(parsed_duration, parsed_resolution, limit) | Parse the number of datapoints of a query.
This can be calculated from the given duration and resolution of the query.
E.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds
then the number of datapoints would be 7200/10 => 7200 datapoints.
:param parsed_duration:
:param parsed_resolution:
:param limit:
:return: |
def dump_sensor_memory(self, cb_compress=False, custom_compress=False, custom_compress_file=None, auto_collect_result=False):
"""Customized function for dumping sensor memory.
:arguments cb_compress: If True, use CarbonBlack's built-in compression.
:arguments custom_compress_file: Supply path to lr_tools/compress_file.bat to fork powershell compression
:collect_mem_file: If True, wait for memdump + and compression to complete, then use cbapi to collect
"""
print("~ dumping contents of memory on {}".format(self.sensor.computer_name))
local_file = remote_file = "{}.memdmp".format(self.sensor.computer_name)
if not self.lr_session:
self.go_live()
try:
if cb_compress and auto_collect_result:
logging.info("CB compression and auto-collection set")
self.lr_session.memdump(remote_filename=remote_file, compress=cb_compress)
return True
dump_object = self.lr_session.start_memdump(remote_filename=remote_file, compress=cb_compress)
dump_object.wait()
if cb_compress:
print("+ Memory dump compressed at -> C:\windows\carbonblack\{}.zip".format(remote_file))
if auto_collect_result:
self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}.zip".format(remote_file))
return True
print("+ Memory dump complete on host -> C:\windows\carbonblack\{}".format(remote_file))
except LiveResponseError as e:
raise Exception("LiveResponseError: {}".format(e))
if custom_compress: # compress with powershell?
if not os.path.exists(custom_compress_file):
logging.debug("{} not found.".format(custom_compress_file))
HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),'..','..'))
custom_compress_file = os.path.join(HOME_DIR, 'lr_tools', 'compress_file.bat')
if not os.path.exists(custom_compress_file):
logging.error("{} not found.".format(custom_compress_file))
return False
logging.info("Using {}".format(custom_compress_file))
bat_filename = custom_compress_file[custom_compress_file.rfind('/')+1:]
filedata = None
with open(custom_compress_file, 'rb') as f:
filedata = f.read()
try:
self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename)
except LiveResponseError as e:
if 'ERROR_FILE_EXISTS' not in str(e):
logging.error("Error puting compress_file.bat")
return False
else:
self.lr_session.delete_file("C:\\Windows\\CarbonBlack\\" + bat_filename)
self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename)
print("~ Launching "+ bat_filename +" to create C:\\windows\\carbonblack\\_memdump.zip")
compress_cmd = "C:\\Windows\\CarbonBlack\\" + bat_filename + " " + remote_file
self.lr_session.create_process(compress_cmd, wait_for_output=False, wait_for_completion=False)
if auto_collect_result:
print("~ waiting for {} to complete.".format(bat_filename))
self.wait_for_process_to_finish(bat_filename)
self.getFile_with_timeout("C:\\windows\\carbonblack\\_memdump.zip")
print("[!] If compression successful, _memdump.zip will exist, and {} should be deleted.".format(remote_file))
# here, they didn't want to use cb or custom compression, but they did want to auto collect results
if auto_collect_result:
self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}".format(remote_file))
return True | Customized function for dumping sensor memory.
:arguments cb_compress: If True, use CarbonBlack's built-in compression.
:arguments custom_compress_file: Supply path to lr_tools/compress_file.bat to fork powershell compression
:collect_mem_file: If True, wait for memdump + and compression to complete, then use cbapi to collect |
def validate_row(self, row):
"""
Ensure each element in the row matches the schema.
"""
clean_row = {}
if isinstance(row, (tuple, list)):
assert self.header_order, "No attribute order specified."
assert len(row) == len(self.header_order), \
"Row length does not match header length."
itr = zip(self.header_order, row)
else:
assert isinstance(row, dict)
itr = iteritems(row)
for el_name, el_value in itr:
if self.header_types[el_name] == ATTR_TYPE_DISCRETE:
clean_row[el_name] = int(el_value)
elif self.header_types[el_name] == ATTR_TYPE_CONTINUOUS:
clean_row[el_name] = float(el_value)
else:
clean_row[el_name] = el_value
return clean_row | Ensure each element in the row matches the schema. |
def _process_inbox_message(self, message: praw.models.Message):
"""
Process a reddit inbox message. Calls `func_message(message, *func_message_args)`.
:param message: Item to process
"""
self._func_message(message, *self._func_message_args) | Process a reddit inbox message. Calls `func_message(message, *func_message_args)`.
:param message: Item to process |
def query_pop(query, prefix, sep='.'):
'''Pop a prefix from a query string.
Parameters
----------
query : str
The query string
prefix : str
The prefix string to pop, if it exists
sep : str
The string to separate fields
Returns
-------
popped : str
`query` with a `prefix` removed from the front (if found)
or `query` if the prefix was not found
Examples
--------
>>> query_pop('Annotation.namespace', 'Annotation')
'namespace'
>>> query_pop('namespace', 'Annotation')
'namespace'
'''
terms = query.split(sep)
if terms[0] == prefix:
terms = terms[1:]
return sep.join(terms) | Pop a prefix from a query string.
Parameters
----------
query : str
The query string
prefix : str
The prefix string to pop, if it exists
sep : str
The string to separate fields
Returns
-------
popped : str
`query` with a `prefix` removed from the front (if found)
or `query` if the prefix was not found
Examples
--------
>>> query_pop('Annotation.namespace', 'Annotation')
'namespace'
>>> query_pop('namespace', 'Annotation')
'namespace' |
def wait_for_stable_cluster(
hosts,
jolokia_port,
jolokia_prefix,
check_interval,
check_count,
unhealthy_time_limit,
):
"""
Block the caller until the cluster can be considered stable.
:param hosts: list of brokers ip addresses
:type hosts: list of strings
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:param check_interval: the number of seconds it will wait between each check
:type check_interval: integer
:param check_count: the number of times the check should be positive before
restarting the next broker
:type check_count: integer
:param unhealthy_time_limit: the maximum number of seconds it will wait for
the cluster to become stable before exiting with error
:type unhealthy_time_limit: integer
"""
stable_counter = 0
max_checks = int(math.ceil(unhealthy_time_limit / check_interval))
for i in itertools.count():
partitions, brokers = read_cluster_status(
hosts,
jolokia_port,
jolokia_prefix,
)
if partitions or brokers:
stable_counter = 0
else:
stable_counter += 1
print(
"Under replicated partitions: {p_count}, missing brokers: {b_count} ({stable}/{limit})".format(
p_count=partitions,
b_count=brokers,
stable=stable_counter,
limit=check_count,
))
if stable_counter >= check_count:
print("The cluster is stable")
return
if i >= max_checks:
raise WaitTimeoutException()
time.sleep(check_interval) | Block the caller until the cluster can be considered stable.
:param hosts: list of brokers ip addresses
:type hosts: list of strings
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:param check_interval: the number of seconds it will wait between each check
:type check_interval: integer
:param check_count: the number of times the check should be positive before
restarting the next broker
:type check_count: integer
:param unhealthy_time_limit: the maximum number of seconds it will wait for
the cluster to become stable before exiting with error
:type unhealthy_time_limit: integer |
def _ensure_counter(self):
"""Ensure the sync counter is a valid non-dummy object."""
if not isinstance(self.sync_counter, self._SynchronizationManager):
self.sync_counter = self._SynchronizationManager() | Ensure the sync counter is a valid non-dummy object. |
def add(self, source_id, auth, validate=True):
""" Add one or more sets of authorization credentials to a Managed Source
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthadd
:param source_id: target Source ID
:type source_id: str
:param auth: An array of the source-specific authorization credential sets that you're adding.
:type auth: array of strings
:param validate: Allows you to suppress the validation of the authorization credentials, defaults to true.
:type validate: bool
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'id': source_id, 'auth': auth, 'validate': validate}
return self.request.post('add', params) | Add one or more sets of authorization credentials to a Managed Source
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthadd
:param source_id: target Source ID
:type source_id: str
:param auth: An array of the source-specific authorization credential sets that you're adding.
:type auth: array of strings
:param validate: Allows you to suppress the validation of the authorization credentials, defaults to true.
:type validate: bool
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` |
def filter(self, collection, data, **kwargs):
"""Filter given collection."""
ops = self.parse(data)
collection = self.apply(collection, ops, **kwargs)
return ops, collection | Filter given collection. |
def generate_cache_key(value):
"""
Generates a cache key for the *args and **kwargs
"""
if is_bytes(value):
return hashlib.md5(value).hexdigest()
elif is_text(value):
return generate_cache_key(to_bytes(text=value))
elif is_boolean(value) or is_null(value) or is_number(value):
return generate_cache_key(repr(value))
elif is_dict(value):
return generate_cache_key((
(key, value[key])
for key
in sorted(value.keys())
))
elif is_list_like(value) or isinstance(value, collections.abc.Generator):
return generate_cache_key("".join((
generate_cache_key(item)
for item
in value
)))
else:
raise TypeError("Cannot generate cache key for value {0} of type {1}".format(
value,
type(value),
)) | Generates a cache key for the *args and **kwargs |
def expire(key, seconds, host=None, port=None, db=None, password=None):
'''
Set a keys time to live in seconds
CLI Example:
.. code-block:: bash
salt '*' redis.expire foo 300
'''
server = _connect(host, port, db, password)
return server.expire(key, seconds) | Set a keys time to live in seconds
CLI Example:
.. code-block:: bash
salt '*' redis.expire foo 300 |
def download(model, direct=False, *pip_args):
"""
Download compatible model from default download path using pip. Model
can be shortcut, model name or, if --direct flag is set, full model name
with version. For direct downloads, the compatibility check will be skipped.
"""
dl_tpl = "{m}-{v}/{m}-{v}.tar.gz#egg={m}=={v}"
if direct:
components = model.split("-")
model_name = "".join(components[:-1])
version = components[-1]
dl = download_model(dl_tpl.format(m=model_name, v=version), pip_args)
else:
shortcuts = get_json(about.__shortcuts__, "available shortcuts")
model_name = shortcuts.get(model, model)
compatibility = get_compatibility()
version = get_version(model_name, compatibility)
dl = download_model(dl_tpl.format(m=model_name, v=version), pip_args)
if dl != 0: # if download subprocess doesn't return 0, exit
sys.exit(dl)
msg.good(
"Download and installation successful",
"You can now load the model via spacy.load('{}')".format(model_name),
)
# Only create symlink if the model is installed via a shortcut like 'en'.
# There's no real advantage over an additional symlink for en_core_web_sm
# and if anything, it's more error prone and causes more confusion.
if model in shortcuts:
try:
# Get package path here because link uses
# pip.get_installed_distributions() to check if model is a
# package, which fails if model was just installed via
# subprocess
package_path = get_package_path(model_name)
link(model_name, model, force=True, model_path=package_path)
except: # noqa: E722
# Dirty, but since spacy.download and the auto-linking is
# mostly a convenience wrapper, it's best to show a success
# message and loading instructions, even if linking fails.
msg.warn(
"Download successful but linking failed",
"Creating a shortcut link for '{}' didn't work (maybe you "
"don't have admin permissions?), but you can still load "
"the model via its full package name: "
"nlp = spacy.load('{}')".format(model, model_name),
) | Download compatible model from default download path using pip. Model
can be shortcut, model name or, if --direct flag is set, full model name
with version. For direct downloads, the compatibility check will be skipped. |
def shuffled_batches(self, batch_size):
""" Generate randomized batches of data - only sample whole trajectories """
if batch_size >= self.num_envs * self.num_steps:
yield self
else:
rollouts_in_batch = batch_size // self.num_steps
batch_splits = math_util.divide_ceiling(self.num_envs, rollouts_in_batch)
indices = list(range(self.num_envs))
np.random.shuffle(indices)
for sub_indices in np.array_split(indices, batch_splits):
yield Trajectories(
num_steps=self.num_steps,
num_envs=len(sub_indices),
# Dont use it in batches for a moment, can be uncommented later if needed
# environment_information=[x[sub_indices.tolist()] for x in self.environment_information],
environment_information=None,
transition_tensors={k: x[:, sub_indices] for k, x in self.transition_tensors.items()},
rollout_tensors={k: x[sub_indices] for k, x in self.rollout_tensors.items()},
# extra_data does not go into batches
) | Generate randomized batches of data - only sample whole trajectories |
def upload(self, filename, directory=None):
"""
Upload a file ``filename`` to ``directory``
:param str filename: path to the file to upload
:param directory: destionation :class:`.Directory`, defaults to
:attribute:`.API.downloads_directory` if None
:return: the uploaded file
:rtype: :class:`.File`
"""
filename = eval_path(filename)
if directory is None:
directory = self.downloads_directory
# First request
res1 = self._req_upload(filename, directory)
data1 = res1['data']
file_id = data1['file_id']
# Second request
res2 = self._req_file(file_id)
data2 = res2['data'][0]
data2.update(**data1)
return _instantiate_uploaded_file(self, data2) | Upload a file ``filename`` to ``directory``
:param str filename: path to the file to upload
:param directory: destionation :class:`.Directory`, defaults to
:attribute:`.API.downloads_directory` if None
:return: the uploaded file
:rtype: :class:`.File` |
def make_systemrestoreitem_originalfilename(original_filename, condition='contains', negate=False, preserve_case=False):
"""
Create a node for SystemRestoreItem/OriginalFileName
:return: A IndicatorItem represented as an Element node
"""
document = 'SystemRestoreItem'
search = 'SystemRestoreItem/OriginalFileName'
content_type = 'string'
content = original_filename
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | Create a node for SystemRestoreItem/OriginalFileName
:return: A IndicatorItem represented as an Element node |
def get_collection(self, event_collection):
"""
Extracts info about a collection using the Keen IO API. A master key must be set first.
:param event_collection: the name of the collection to retrieve info for
"""
url = "{0}/{1}/projects/{2}/events/{3}".format(self.base_url, self.api_version,
self.project_id, event_collection)
headers = utilities.headers(self.read_key)
response = self.fulfill(HTTPMethods.GET, url, headers=headers, timeout=self.get_timeout)
self._error_handling(response)
return response.json() | Extracts info about a collection using the Keen IO API. A master key must be set first.
:param event_collection: the name of the collection to retrieve info for |
def generate_contentinfo_from_folder(self, csvwriter, rel_path, filenames):
"""
Create a topic node row in Content.csv for the folder at `rel_path` and
add content node rows for all the files in the `rel_path` folder.
"""
LOGGER.debug('IN process_folder ' + str(rel_path) + ' ' + str(filenames))
from ricecooker.utils.linecook import filter_filenames, filter_thumbnail_files, chan_path_from_rel_path
# WRITE TOPIC ROW
topicrow = self.channeldir_node_to_row( rel_path.split(os.path.sep) )
csvwriter.writerow(topicrow)
# WRITE CONTENT NODE ROWS
chan_path = chan_path_from_rel_path(rel_path, self.channeldir)
filenames_cleaned = filter_filenames(filenames)
# filenames_cleaned2 = filter_thumbnail_files(chan_path, filenames_cleaned, self)
for filename in filenames_cleaned:
path_tuple = rel_path.split(os.path.sep)
path_tuple.append(filename)
filerow = self.channeldir_node_to_row(path_tuple)
csvwriter.writerow(filerow) | Create a topic node row in Content.csv for the folder at `rel_path` and
add content node rows for all the files in the `rel_path` folder. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.