code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False):
"""
This converts the bedfile to bedpefile, assuming the reads are from CA.
"""
fp = must_open(bedfile)
fw = must_open(bedpefile, "w")
if pairsbedfile:
fwpairs = must_open(pairsbedfile, "w")
clones = defaultdict(list)
for row in fp:
b = BedLine(row)
name = b.accn
clonename = clone_name(name, ca=ca)
clones[clonename].append(b)
if matesfile:
fp = open(matesfile)
libraryline = next(fp)
# 'library bes 37896 126916'
lib, name, smin, smax = libraryline.split()
assert lib == "library"
smin, smax = int(smin), int(smax)
logging.debug("Happy mates for lib {0} fall between {1} - {2}".\
format(name, smin, smax))
nbedpe = 0
nspan = 0
for clonename, blines in clones.items():
nlines = len(blines)
if nlines == 2:
a, b = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = b.seqid, b.start, b.end
outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename]
if strand:
outcols.extend([0, a.strand, b.strand])
print("\t".join(str(x) for x in outcols), file=fw)
nbedpe += 1
elif nlines == 1:
a, = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = 0, 0, 0
else: # More than two lines per pair
pass
if pairsbedfile:
start = min(astart, bstart) if bstart > 0 else astart
end = max(aend, bend) if bend > 0 else aend
if aseqid != bseqid:
continue
span = end - start + 1
if (not matesfile) or (smin <= span <= smax):
print("\t".join(str(x) for x in \
(aseqid, start - 1, end, clonename)), file=fwpairs)
nspan += 1
fw.close()
logging.debug("A total of {0} bedpe written to `{1}`.".\
format(nbedpe, bedpefile))
if pairsbedfile:
fwpairs.close()
logging.debug("A total of {0} spans written to `{1}`.".\
format(nspan, pairsbedfile)) | This converts the bedfile to bedpefile, assuming the reads are from CA. |
def get_repository(self, entity_cls):
"""Retrieve a Repository for the Model with a live connection"""
entity_record = self._get_entity_by_class(entity_cls)
provider = self.get_provider(entity_record.provider_name)
return provider.get_repository(entity_record.entity_cls) | Retrieve a Repository for the Model with a live connection |
def list_upgrades(refresh=True, root=None, **kwargs):
'''
List all available package upgrades on this system
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
if refresh:
refresh_db(root)
ret = dict()
cmd = ['list-updates']
if 'fromrepo' in kwargs:
repo_name = kwargs['fromrepo']
if not isinstance(repo_name, six.string_types):
repo_name = six.text_type(repo_name)
cmd.extend(['--repo', repo_name])
for update_node in __zypper__(root=root).nolock.xml.call(*cmd).getElementsByTagName('update'):
if update_node.getAttribute('kind') == 'package':
ret[update_node.getAttribute('name')] = update_node.getAttribute('edition')
return ret | List all available package upgrades on this system
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades |
def update_device(self, device_id, **kwargs):
"""Update existing device in catalog.
.. code-block:: python
existing_device = api.get_device(...)
updated_device = api.update_device(
existing_device.id,
certificate_fingerprint = "something new"
)
:param str device_id: The ID of the device to update (Required)
:param obj custom_attributes: Up to 5 custom JSON attributes
:param str description: The description of the device
:param str name: The name of the device
:param str alias: The alias of the device
:param str device_type: The endpoint type of the device - e.g. if the device is a gateway
:param str host_gateway: The endpoint_name of the host gateway, if appropriate
:param str certificate_fingerprint: Fingerprint of the device certificate
:param str certificate_issuer_id: ID of the issuer of the certificate
:returns: the updated device object
:rtype: Device
"""
api = self._get_api(device_directory.DefaultApi)
device = Device._create_request_map(kwargs)
body = DeviceDataPostRequest(**device)
return Device(api.device_update(device_id, body)) | Update existing device in catalog.
.. code-block:: python
existing_device = api.get_device(...)
updated_device = api.update_device(
existing_device.id,
certificate_fingerprint = "something new"
)
:param str device_id: The ID of the device to update (Required)
:param obj custom_attributes: Up to 5 custom JSON attributes
:param str description: The description of the device
:param str name: The name of the device
:param str alias: The alias of the device
:param str device_type: The endpoint type of the device - e.g. if the device is a gateway
:param str host_gateway: The endpoint_name of the host gateway, if appropriate
:param str certificate_fingerprint: Fingerprint of the device certificate
:param str certificate_issuer_id: ID of the issuer of the certificate
:returns: the updated device object
:rtype: Device |
def plot_trajectory_with_elegans(
obs, width=350, height=350, config=None, grid=True, wireframe=False,
max_count=10, camera_position=(-22, 23, 32), camera_rotation=(-0.6, 0.5, 0.6),
plot_range=None):
"""
Generate a plot from received instance of TrajectoryObserver and show it
on IPython notebook.
Parameters
----------
obs : TrajectoryObserver
TrajectoryObserver to render.
width : float, default 350
Width of the plotting area.
height : float, default 350
Height of the plotting area.
config : dict, default {}
Dict for configure default colors. Its values are colors unique
to each particle. The dictionary will be updated during this plot.
Colors included in config dict will never be used for other particles.
camera_position : tuple, default (-30, 31, 42)
camera_rotaiton : tuple, default (-0.6, 0.5, 0.6)
Initial position and rotation of camera.
plot_range : tuple, default None
Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez).
If None, the minimum volume containing all the trajectories is used.
"""
config = config or {}
from IPython.core.display import display, HTML
color_scale = default_color_scale(config=config)
plots = []
xmin, xmax, ymin, ymax, zmin, zmax = None, None, None, None, None, None
data = obs.data()
if max_count is not None and len(data) > max_count:
data = random.sample(data, max_count)
for i, y in enumerate(data):
xarr, yarr, zarr = [], [], []
for pos in y:
xarr.append(pos[0])
yarr.append(pos[1])
zarr.append(pos[2])
if xmin is None:
if len(y) > 0:
xmin, xmax = min(xarr), max(xarr)
ymin, ymax = min(yarr), max(yarr)
zmin, zmax = min(zarr), max(zarr)
else:
xmin, xmax = min([xmin] + xarr), max([xmax] + xarr)
ymin, ymax = min([ymin] + yarr), max([ymax] + yarr)
zmin, zmax = min([zmin] + zarr), max([zmax] + zarr)
name = str(i + 1)
c = color_scale.get_color(name)
plots.append({
'type': 'Line',
'data': {'x': xarr, 'y': yarr, 'z': zarr},
'options': {
'name': name,
'thickness': 2, # XXX: 'thikness' doesn't work on Windows
'colors': [c, c]}
})
if plot_range is None:
if xmin is None:
xmin, xmax, ymin, ymax, zmin, zmax = 0, 1, 0, 1, 0, 1
max_length = max(xmax - xmin, ymax - ymin, zmax - zmin)
rangex = [(xmin + xmax - max_length) * 0.5,
(xmin + xmax + max_length) * 0.5]
rangey = [(ymin + ymax - max_length) * 0.5,
(ymin + ymax + max_length) * 0.5]
rangez = [(zmin + zmax - max_length) * 0.5,
(zmin + zmax + max_length) * 0.5]
wrange = {'x': rangex, 'y': rangey, 'z': rangez}
else:
wrange = __get_range_of_trajectories(None, plot_range)
model = {
'plots': plots,
'options': {
'world_width': width,
'world_height': height,
'range': wrange,
'autorange': False,
'grid': grid,
'save_image': True
}
}
if wireframe:
model['options']['space_mode'] = 'wireframe'
model_id = '"viz' + str(uuid.uuid4()) + '"'
display(HTML(generate_html(
{'model': json.dumps(model), 'model_id': model_id,
'px': camera_position[0], 'py': camera_position[1], 'pz': camera_position[2],
'rx': camera_rotation[0], 'ry': camera_rotation[1], 'rz': camera_rotation[2]},
'templates/particles.tmpl'))) | Generate a plot from received instance of TrajectoryObserver and show it
on IPython notebook.
Parameters
----------
obs : TrajectoryObserver
TrajectoryObserver to render.
width : float, default 350
Width of the plotting area.
height : float, default 350
Height of the plotting area.
config : dict, default {}
Dict for configure default colors. Its values are colors unique
to each particle. The dictionary will be updated during this plot.
Colors included in config dict will never be used for other particles.
camera_position : tuple, default (-30, 31, 42)
camera_rotaiton : tuple, default (-0.6, 0.5, 0.6)
Initial position and rotation of camera.
plot_range : tuple, default None
Range for plotting. A triplet of pairs suggesting (rangex, rangey, rangez).
If None, the minimum volume containing all the trajectories is used. |
def load_configuration_from_text_file(register, configuration_file):
'''Loading configuration from text files to register object
Parameters
----------
register : pybar.fei4.register object
configuration_file : string
Full path (directory and filename) of the configuration file. If name is not given, reload configuration from file.
'''
logging.info("Loading configuration: %s" % configuration_file)
register.configuration_file = configuration_file
config_dict = parse_global_config(register.configuration_file)
if 'Flavor' in config_dict:
flavor = config_dict.pop('Flavor').lower()
if register.flavor:
pass
else:
register.init_fe_type(flavor)
else:
if register.flavor:
pass
else:
raise ValueError('Flavor not specified')
if 'Chip_ID' in config_dict:
chip_id = config_dict.pop('Chip_ID')
if register.chip_address:
pass
else:
register.set_chip_address(chip_address=chip_id & 0x7, broadcast=True if chip_id & 0x8 else False)
elif 'Chip_Address' in config_dict:
chip_address = config_dict.pop('Chip_Address')
if register.chip_address:
pass
else:
register.set_chip_address(chip_address)
else:
if register.chip_id_initialized:
pass
else:
raise ValueError('Chip address not specified')
global_registers_configured = []
pixel_registers_configured = []
for key in config_dict.keys():
value = config_dict.pop(key)
if key in register.global_registers:
register.set_global_register_value(key, value)
global_registers_configured.append(key)
elif key in register.pixel_registers:
register.set_pixel_register_value(key, value)
pixel_registers_configured.append(key)
elif key in register.calibration_parameters:
register.calibration_parameters[key] = value
else:
register.miscellaneous[key] = value
global_registers = register.get_global_register_attributes('name', readonly=False)
pixel_registers = register.pixel_registers.keys()
global_registers_not_configured = set(global_registers).difference(global_registers_configured)
pixel_registers_not_configured = set(pixel_registers).difference(pixel_registers_configured)
if global_registers_not_configured:
logging.warning("Following global register(s) not configured: {}".format(', '.join('\'' + reg + '\'' for reg in global_registers_not_configured)))
if pixel_registers_not_configured:
logging.warning("Following pixel register(s) not configured: {}".format(', '.join('\'' + reg + '\'' for reg in pixel_registers_not_configured)))
if register.miscellaneous:
logging.warning("Found following unknown parameter(s): {}".format(', '.join('\'' + parameter + '\'' for parameter in register.miscellaneous.iterkeys()))) | Loading configuration from text files to register object
Parameters
----------
register : pybar.fei4.register object
configuration_file : string
Full path (directory and filename) of the configuration file. If name is not given, reload configuration from file. |
def save_firefox_profile(self, remove_old=False):
"""Function to save the firefox profile to the permanant one"""
self.logger.info("Saving profile from %s to %s" % (self._profile.path, self._profile_path))
if remove_old:
if os.path.exists(self._profile_path):
try:
shutil.rmtree(self._profile_path)
except OSError:
pass
shutil.copytree(os.path.join(self._profile.path), self._profile_path,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
else:
for item in os.listdir(self._profile.path):
if item in ["parent.lock", "lock", ".parentlock"]:
continue
s = os.path.join(self._profile.path, item)
d = os.path.join(self._profile_path, item)
if os.path.isdir(s):
shutil.copytree(s, d,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
else:
shutil.copy2(s, d)
with open(os.path.join(self._profile_path, self._LOCAL_STORAGE_FILE), 'w') as f:
f.write(dumps(self.get_local_storage())) | Function to save the firefox profile to the permanant one |
def _compute_betas_gwr(y, x, wi):
"""
compute MLE coefficients using iwls routine
Methods: p189, Iteratively (Re)weighted Least Squares (IWLS),
Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).
Geographically weighted regression: the analysis of spatially varying relationships.
"""
xT = (x * wi).T
xtx = np.dot(xT, x)
xtx_inv_xt = linalg.solve(xtx, xT)
betas = np.dot(xtx_inv_xt, y)
return betas, xtx_inv_xt | compute MLE coefficients using iwls routine
Methods: p189, Iteratively (Re)weighted Least Squares (IWLS),
Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).
Geographically weighted regression: the analysis of spatially varying relationships. |
def enable_audit_device(self, device_type, description=None, options=None, path=None):
"""Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if path is None:
path = device_type
params = {
'type': device_type,
'description': description,
'options': options,
}
api_path = '/v1/sys/audit/{path}'.format(path=path)
return self._adapter.post(
url=api_path,
json=params
) | Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response |
def check_timeseries_id(self, dataset):
'''
Checks that if a variable exists for the time series id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
timeseries_ids = dataset.get_variables_by_attributes(cf_role='timeseries_id')
# No need to check
if not timeseries_ids:
return
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the timeSeries variable')
timeseries_variable = timeseries_ids[0]
test_ctx.assert_true(
getattr(timeseries_variable, 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
return test_ctx.to_result() | Checks that if a variable exists for the time series id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset |
def _AlignDecodedDataOffset(self, decoded_data_offset):
"""Aligns the encoded file with the decoded data offset.
Args:
decoded_data_offset (int): decoded data offset.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decoder = self._GetDecoder()
self._decoded_data = b''
encoded_data_offset = 0
encoded_data_size = self._file_object.get_size()
while encoded_data_offset < encoded_data_size:
read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encoded_data_offset += read_count
if decoded_data_offset < self._decoded_data_size:
self._decoded_data_offset = decoded_data_offset
break
decoded_data_offset -= self._decoded_data_size | Aligns the encoded file with the decoded data offset.
Args:
decoded_data_offset (int): decoded data offset. |
def notify_slack(title, content, attachment_color="#4bb543", short_threshold=40, token=None,
channel=None, mention_user=None, **kwargs):
"""
Sends a slack notification and returns *True* on success. The communication with the slack API
might have some delays and is therefore handled by a thread. The format of the notification
depends on *content*. If it is a string, a simple text notification is sent. Otherwise, it
should be a dictionary whose fields are used to build a message attachment with two-column
formatting.
"""
# test import
import slackclient # noqa: F401
cfg = Config.instance()
# get default token and channel
if not token:
token = cfg.get_expanded("notifications", "slack_token")
if not channel:
channel = cfg.get_expanded("notifications", "slack_channel")
if not token or not channel:
logger.warning("cannot send Slack notification, token ({}) or channel ({}) empty".format(
token, channel))
return False
# append the user to mention to the title
# unless explicitly set to empty string
mention_text = ""
if mention_user is None:
mention_user = cfg.get_expanded("notifications", "slack_mention_user")
if mention_user:
mention_text = " (@{})".format(mention_user)
# request data for the API call
request = {
"channel": channel,
"as_user": True,
"parse": "full",
}
# standard or attachment content?
if isinstance(content, six.string_types):
request["text"] = "{}{}\n\n{}".format(title, mention_text, content)
else:
# content is a dict, send its data as an attachment
request["text"] = "{} {}".format(title, mention_text)
request["attachments"] = at = {
"color": attachment_color,
"fields": [],
"fallback": "{}{}\n\n".format(title, mention_text),
}
# fill the attachment fields and extend the fallback
for key, value in content.items():
at["fields"].append({
"title": key,
"value": value,
"short": len(value) <= short_threshold,
})
at["fallback"] += "_{}_: {}\n".format(key, value)
# extend by arbitrary kwargs
request.update(kwargs)
# threaded, non-blocking API communication
thread = threading.Thread(target=_notify_slack, args=(token, request))
thread.start()
return True | Sends a slack notification and returns *True* on success. The communication with the slack API
might have some delays and is therefore handled by a thread. The format of the notification
depends on *content*. If it is a string, a simple text notification is sent. Otherwise, it
should be a dictionary whose fields are used to build a message attachment with two-column
formatting. |
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise.
"""
location = getattr(path_spec, 'location', None)
if location is None:
return False
is_device = False
if platform.system() == 'Windows':
# Note that os.path.exists() returns False for Windows device files so
# instead use libsmdev to do the check.
try:
is_device = pysmdev.check_device(location)
except IOError as exception:
# Since pysmdev will raise IOError when it has no access to the device
# we check if the exception message contains ' access denied ' and
# return true.
# Note that exception.message no longer works in Python 3.
exception_string = str(exception)
if not isinstance(exception_string, py2to3.UNICODE_TYPE):
exception_string = py2to3.UNICODE_TYPE(
exception_string, errors='replace')
if ' access denied ' in exception_string:
is_device = True
# Note that os.path.exists() returns False for broken symbolic links hence
# an additional check using os.path.islink() is necessary.
return is_device or os.path.exists(location) or os.path.islink(location) | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise. |
def stft(func=None, **kwparams):
"""
Short Time Fourier Transform block processor / phase vocoder wrapper.
This function can be used in many ways:
* Directly as a signal processor builder, wrapping a spectrum block/grain
processor function;
* Directly as a decorator to a block processor;
* Called without the ``func`` parameter for a partial evalution style
changing the defaults.
See the examples below for more information about these use cases.
The resulting function performs a full block-by-block analysis/synthesis
phase vocoder keeping this sequence of actions:
1. Blockenize the signal with the given ``size`` and ``hop``;
2. Lazily apply the given ``wnd`` window to each block;
3. Perform the 5 actions calling their functions in order:
a. ``before``: Pre-processing;
b. ``transform``: A transform like the FFT;
c. ``func``: the positional parameter with the single block processor;
d. ``inverse_transform``: inverse FFT;
e. ``after``: Post-processing.
4. Overlap-add with the ``ola`` overlap-add strategy. The given ``ola``
would deal with its own window application and normalization.
Any parameter from steps 3 and 4 can be set to ``None`` to skip it from
the full process, without changing the other [sub]steps. The parameters
defaults are based on the Numpy FFT subpackage.
Parameters
----------
func :
The block/grain processor function that receives a transformed block in
the frequency domain (the ``transform`` output) and should return the
processed data (it will be the first ``inverse_transform`` input). This
parameter shouldn't appear when this function is used as a decorator.
size :
Block size for the STFT process, in samples.
hop :
Duration in samples between two blocks. Defaults to the ``size`` value.
transform :
Function that receives the windowed block (in time domain) and the
``size`` as two positional inputs and should return the block (in
frequency domain). Defaults to ``numpy.fft.rfft``, which outputs a
Numpy 1D array with length equals to ``size // 2 + 1``.
inverse_transform :
Function that receives the processed block (in frequency domain) and the
``size`` as two positional inputs and should return the block (in
time domain). Defaults to ``numpy.fft.irfft``.
wnd :
Window function to be called as ``wnd(size)`` or window iterable with
length equals to ``size``. The windowing/apodization values are used
before taking the FFT of each block. Defaults to None, which means no
window should be applied (same behavior of a rectangular window).
before :
Function to be applied just before taking the transform, after the
windowing. Defaults to the ``numpy.fft.ifftshift``, which, together with
the ``after`` default, puts the time reference at the ``size // 2``
index of the block, centralizing it for the FFT (e.g. blocks
``[0, 1, 0]`` and ``[0, 0, 1, 0]`` would have zero phase). To disable
this realignment, just change both ``before=None`` and ``after=None``
keywords.
after :
Function to be applied just after the inverse transform, before calling
the overlap-add (as well as before its windowing, if any). Defaults to
the ``numpy.fft.fftshift`` function, which undo the changes done by the
default ``before`` pre-processing for block phase alignment. To avoid
the default time-domain realignment, set both ``before=None`` and
``after=None`` keywords.
ola :
Overlap-add strategy. Uses the ``overlap_add`` default strategy when
not given. The strategy should allow at least size and hop keyword
arguments, besides a first positional argument for the iterable with
blocks. If ``ola=None``, the result from using the STFT processor will be
the ``Stream`` of blocks that would be the overlap-add input.
ola_* :
Extra keyword parameters for the overlap-add strategy, if any. The extra
``ola_`` prefix is removed when calling it. See the overlap-add strategy
docs for more information about the valid parameters.
Returns
-------
A function with the same parameters above, besides ``func``, which is
replaced by the signal input (if func was given). The parameters used when
building the function should be seen as defaults that can be changed when
calling the resulting function with the respective keyword arguments.
Examples
--------
Let's process something:
>>> my_signal = Stream(.1, .3, -.1, -.3, .5, .4, .3)
Wrapping directly the processor function:
>>> processor_w = stft(abs, size=64)
>>> sig = my_signal.copy() # Any iterable
>>> processor_w(sig)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> peek200_w = _.peek(200) # Needs Numpy
>>> type(peek200_w[0]).__name__ # Result is a signal (numpy.float64 data)
'float64'
Keyword parameters in a partial evaluation style (can be reassigned):
>>> stft64 = stft(size=64) # Same to ``stft`` but with other defaults
>>> processor_p = stft64(abs)
>>> sig = my_signal.copy() # Any iterable
>>> processor_p(sig)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> _.peek(200) == peek200_w # This should do the same thing
True
As a decorator, this time with other windowing configuration:
>>> stft64hann = stft64(wnd=window.hann, ola_wnd=window.hann)
>>> @stft64hann # stft(...) can also be used as an anonymous decorator
... def processor_d(blk):
... return abs(blk)
>>> processor_d(sig) # This leads to a different result
<audiolazy.lazy_stream.Stream object at 0x...>
>>> _.peek(200) == peek200_w
False
You can also use other iterables as input, and keep the parameters to be
passed afterwards, as well as change transform calculation:
>>> stft_no_zero_phase = stft(before=None, after=None)
>>> stft_no_wnd = stft_no_zero_phase(ola=overlap_add.list, ola_wnd=None,
... ola_normalize=False)
>>> on_blocks = stft_no_wnd(transform=None, inverse_transform=None)
>>> processor_a = on_blocks(reversed, hop=4) # Reverse
>>> processor_a([1, 2, 3, 4, 5], size=4, hop=2)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(_) # From blocks [1, 2, 3, 4] and [3, 4, 5, 0.0]
[4.0, 3.0, 2.0, 6, 4, 3]
>>> processor_a([1, 2, 3, 4, 5], size=4) # Default hop instead
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(_) # No overlap, blocks [1, 2, 3, 4] and [5, 0.0, 0.0, 0.0]
[4, 3, 2, 1, 0.0, 0.0, 0.0, 5]
>>> processor_a([1, 2, 3, 4, 5]) # Size was never given
Traceback (most recent call last):
...
TypeError: Missing 'size' argument
For analysis only, one can set ``ola=None``:
>>> from numpy.fft import ifftshift # [1, 2, 3, 4, 5] -> [3, 4, 5, 1, 2]
>>> analyzer = stft(ifftshift, ola=None, size=8, hop=2)
>>> sig = Stream(1, 0, -1, 0) # A pi/2 rad/sample cosine signal
>>> result = analyzer(sig)
>>> result
<audiolazy.lazy_stream.Stream object at 0x...>
Let's see the result contents. That processing "rotates" the frequencies,
converting the original ``[0, 0, 4, 0, 0]`` real FFT block to a
``[4, 0, 0, 0, 0]`` block, which means the block cosine was moved to
a DC-only signal keeping original energy/integral:
>>> result.take()
array([ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
>>> result.take() # From [0, 0, -4, 0, 0] to [-4, 0, 0, 0, 0]
array([-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
Note
----
Parameters should be passed as keyword arguments. The only exception
is ``func`` for this function and ``sig`` for the returned function,
which are always the first positional argument, ald also the one that
shouldn't appear when using this function as a decorator.
Hint
----
1. When using Numpy FFT, one can keep data in place and return the
changed input block to save time;
2. Actually, there's nothing in this function that imposes FFT or Numpy
besides the default values. One can still use this even for other
transforms that have nothing to do with the Fourier Transform.
See Also
--------
overlap_add :
Overlap-add algorithm for an iterable (e.g. a Stream instance) of blocks
(sequences such as lists or Numpy arrays). It's also a StrategyDict.
window :
Window/apodization/tapering functions for a given size as a StrategyDict.
"""
# Using as a decorator or to "replicate" this function with other defaults
if func is None:
cfi = chain.from_iterable
mix_dict = lambda *dicts: dict(cfi(iteritems(d) for d in dicts))
result = lambda f=None, **new_kws: stft(f, **mix_dict(kwparams, new_kws))
return result
# Using directly
@tostream
@wraps(func)
def wrapper(sig, **kwargs):
kws = kwparams.copy()
kws.update(kwargs)
if "size" not in kws:
raise TypeError("Missing 'size' argument")
if "hop" in kws and kws["hop"] > kws["size"]:
raise ValueError("Hop value can't be higher than size")
blk_params = {"size": kws.pop("size")}
blk_params["hop"] = kws.pop("hop", None)
ola_params = blk_params.copy() # Size and hop
blk_params["wnd"] = kws.pop("wnd", None)
ola = kws.pop("ola", overlap_add)
class NotSpecified(object):
pass
for name in ["transform", "inverse_transform", "before", "after"]:
blk_params[name] = kws.pop(name, NotSpecified)
for k, v in kws.items():
if k.startswith("ola_"):
if ola is not None:
ola_params[k[len("ola_"):]] = v
else:
raise TypeError("Extra '{}' argument with no overlap-add "
"strategy".format(k))
else:
raise TypeError("Unknown '{}' extra argument".format(k))
def blk_gen(size, hop, wnd, transform, inverse_transform, before, after):
if transform is NotSpecified:
from numpy.fft import rfft as transform
if inverse_transform is NotSpecified:
from numpy.fft import irfft as inverse_transform
if before is NotSpecified:
from numpy.fft import ifftshift as before
if after is NotSpecified:
from numpy.fft import fftshift as after
# Find the right windowing function to be applied
if callable(wnd) and not isinstance(wnd, Stream):
wnd = wnd(size)
if isinstance(wnd, Iterable):
wnd = list(wnd)
if len(wnd) != size:
raise ValueError("Incompatible window size")
elif wnd is not None:
raise TypeError("Window should be an iterable or a callable")
# Pad size lambdas
trans = transform and (lambda blk: transform(blk, size))
itrans = inverse_transform and (lambda blk:
inverse_transform(blk, size))
# Continuation style calling
funcs = [f for f in [before, trans, func, itrans, after]
if f is not None]
process = lambda blk: reduce(lambda data, f: f(data), funcs, blk)
if wnd is None:
for blk in Stream(sig).blocks(size=size, hop=hop):
yield process(blk)
else:
blk_with_wnd = wnd[:]
mul = operator.mul
for blk in Stream(sig).blocks(size=size, hop=hop):
blk_with_wnd[:] = xmap(mul, blk, wnd)
yield process(blk_with_wnd)
if ola is None:
return blk_gen(**blk_params)
else:
return ola(blk_gen(**blk_params), **ola_params)
return wrapper | Short Time Fourier Transform block processor / phase vocoder wrapper.
This function can be used in many ways:
* Directly as a signal processor builder, wrapping a spectrum block/grain
processor function;
* Directly as a decorator to a block processor;
* Called without the ``func`` parameter for a partial evalution style
changing the defaults.
See the examples below for more information about these use cases.
The resulting function performs a full block-by-block analysis/synthesis
phase vocoder keeping this sequence of actions:
1. Blockenize the signal with the given ``size`` and ``hop``;
2. Lazily apply the given ``wnd`` window to each block;
3. Perform the 5 actions calling their functions in order:
a. ``before``: Pre-processing;
b. ``transform``: A transform like the FFT;
c. ``func``: the positional parameter with the single block processor;
d. ``inverse_transform``: inverse FFT;
e. ``after``: Post-processing.
4. Overlap-add with the ``ola`` overlap-add strategy. The given ``ola``
would deal with its own window application and normalization.
Any parameter from steps 3 and 4 can be set to ``None`` to skip it from
the full process, without changing the other [sub]steps. The parameters
defaults are based on the Numpy FFT subpackage.
Parameters
----------
func :
The block/grain processor function that receives a transformed block in
the frequency domain (the ``transform`` output) and should return the
processed data (it will be the first ``inverse_transform`` input). This
parameter shouldn't appear when this function is used as a decorator.
size :
Block size for the STFT process, in samples.
hop :
Duration in samples between two blocks. Defaults to the ``size`` value.
transform :
Function that receives the windowed block (in time domain) and the
``size`` as two positional inputs and should return the block (in
frequency domain). Defaults to ``numpy.fft.rfft``, which outputs a
Numpy 1D array with length equals to ``size // 2 + 1``.
inverse_transform :
Function that receives the processed block (in frequency domain) and the
``size`` as two positional inputs and should return the block (in
time domain). Defaults to ``numpy.fft.irfft``.
wnd :
Window function to be called as ``wnd(size)`` or window iterable with
length equals to ``size``. The windowing/apodization values are used
before taking the FFT of each block. Defaults to None, which means no
window should be applied (same behavior of a rectangular window).
before :
Function to be applied just before taking the transform, after the
windowing. Defaults to the ``numpy.fft.ifftshift``, which, together with
the ``after`` default, puts the time reference at the ``size // 2``
index of the block, centralizing it for the FFT (e.g. blocks
``[0, 1, 0]`` and ``[0, 0, 1, 0]`` would have zero phase). To disable
this realignment, just change both ``before=None`` and ``after=None``
keywords.
after :
Function to be applied just after the inverse transform, before calling
the overlap-add (as well as before its windowing, if any). Defaults to
the ``numpy.fft.fftshift`` function, which undo the changes done by the
default ``before`` pre-processing for block phase alignment. To avoid
the default time-domain realignment, set both ``before=None`` and
``after=None`` keywords.
ola :
Overlap-add strategy. Uses the ``overlap_add`` default strategy when
not given. The strategy should allow at least size and hop keyword
arguments, besides a first positional argument for the iterable with
blocks. If ``ola=None``, the result from using the STFT processor will be
the ``Stream`` of blocks that would be the overlap-add input.
ola_* :
Extra keyword parameters for the overlap-add strategy, if any. The extra
``ola_`` prefix is removed when calling it. See the overlap-add strategy
docs for more information about the valid parameters.
Returns
-------
A function with the same parameters above, besides ``func``, which is
replaced by the signal input (if func was given). The parameters used when
building the function should be seen as defaults that can be changed when
calling the resulting function with the respective keyword arguments.
Examples
--------
Let's process something:
>>> my_signal = Stream(.1, .3, -.1, -.3, .5, .4, .3)
Wrapping directly the processor function:
>>> processor_w = stft(abs, size=64)
>>> sig = my_signal.copy() # Any iterable
>>> processor_w(sig)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> peek200_w = _.peek(200) # Needs Numpy
>>> type(peek200_w[0]).__name__ # Result is a signal (numpy.float64 data)
'float64'
Keyword parameters in a partial evaluation style (can be reassigned):
>>> stft64 = stft(size=64) # Same to ``stft`` but with other defaults
>>> processor_p = stft64(abs)
>>> sig = my_signal.copy() # Any iterable
>>> processor_p(sig)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> _.peek(200) == peek200_w # This should do the same thing
True
As a decorator, this time with other windowing configuration:
>>> stft64hann = stft64(wnd=window.hann, ola_wnd=window.hann)
>>> @stft64hann # stft(...) can also be used as an anonymous decorator
... def processor_d(blk):
... return abs(blk)
>>> processor_d(sig) # This leads to a different result
<audiolazy.lazy_stream.Stream object at 0x...>
>>> _.peek(200) == peek200_w
False
You can also use other iterables as input, and keep the parameters to be
passed afterwards, as well as change transform calculation:
>>> stft_no_zero_phase = stft(before=None, after=None)
>>> stft_no_wnd = stft_no_zero_phase(ola=overlap_add.list, ola_wnd=None,
... ola_normalize=False)
>>> on_blocks = stft_no_wnd(transform=None, inverse_transform=None)
>>> processor_a = on_blocks(reversed, hop=4) # Reverse
>>> processor_a([1, 2, 3, 4, 5], size=4, hop=2)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(_) # From blocks [1, 2, 3, 4] and [3, 4, 5, 0.0]
[4.0, 3.0, 2.0, 6, 4, 3]
>>> processor_a([1, 2, 3, 4, 5], size=4) # Default hop instead
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(_) # No overlap, blocks [1, 2, 3, 4] and [5, 0.0, 0.0, 0.0]
[4, 3, 2, 1, 0.0, 0.0, 0.0, 5]
>>> processor_a([1, 2, 3, 4, 5]) # Size was never given
Traceback (most recent call last):
...
TypeError: Missing 'size' argument
For analysis only, one can set ``ola=None``:
>>> from numpy.fft import ifftshift # [1, 2, 3, 4, 5] -> [3, 4, 5, 1, 2]
>>> analyzer = stft(ifftshift, ola=None, size=8, hop=2)
>>> sig = Stream(1, 0, -1, 0) # A pi/2 rad/sample cosine signal
>>> result = analyzer(sig)
>>> result
<audiolazy.lazy_stream.Stream object at 0x...>
Let's see the result contents. That processing "rotates" the frequencies,
converting the original ``[0, 0, 4, 0, 0]`` real FFT block to a
``[4, 0, 0, 0, 0]`` block, which means the block cosine was moved to
a DC-only signal keeping original energy/integral:
>>> result.take()
array([ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
>>> result.take() # From [0, 0, -4, 0, 0] to [-4, 0, 0, 0, 0]
array([-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
Note
----
Parameters should be passed as keyword arguments. The only exception
is ``func`` for this function and ``sig`` for the returned function,
which are always the first positional argument, ald also the one that
shouldn't appear when using this function as a decorator.
Hint
----
1. When using Numpy FFT, one can keep data in place and return the
changed input block to save time;
2. Actually, there's nothing in this function that imposes FFT or Numpy
besides the default values. One can still use this even for other
transforms that have nothing to do with the Fourier Transform.
See Also
--------
overlap_add :
Overlap-add algorithm for an iterable (e.g. a Stream instance) of blocks
(sequences such as lists or Numpy arrays). It's also a StrategyDict.
window :
Window/apodization/tapering functions for a given size as a StrategyDict. |
def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []):
""" Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature.
"""
parent_feature = validate_feature (feature_name)
# Add grist to the subfeature name if a value-string was supplied
subfeature_name = __get_subfeature_name (subfeature, value_string)
if subfeature_name in __all_features[feature_name].subfeatures:
message = "'%s' already declared as a subfeature of '%s'" % (subfeature, feature_name)
message += " specific to '%s'" % value_string
raise BaseException (message)
# First declare the subfeature as a feature in its own right
f = feature (feature_name + '-' + subfeature_name, subvalues, attributes + ['subfeature'])
f.set_parent(parent_feature, value_string)
parent_feature.add_subfeature(f)
# Now make sure the subfeature values are known.
extend_subfeature (feature_name, value_string, subfeature, subvalues) | Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature. |
def select_params_from_section_schema(section_schema, param_class=Param,
deep=False):
"""Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params
"""
# pylint: disable=invalid-name
for name, value in inspect.getmembers(section_schema):
if name.startswith("__") or value is None:
continue # pragma: no cover
elif inspect.isclass(value) and deep:
# -- CASE: class => SELF-CALL (recursively).
# pylint: disable= bad-continuation
cls = value
for name, value in select_params_from_section_schema(cls,
param_class=param_class, deep=True):
yield (name, value)
elif isinstance(value, param_class):
yield (name, value) | Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params |
def _to_repeatmasker_string(pairwise_alignment, column_width=DEFAULT_COL_WIDTH,
m_name_width=DEFAULT_MAX_NAME_WIDTH):
"""
generate a repeatmasker formated representation of this pairwise alignment.
:param column_width: number of characters to output per line of alignment
:param m_name_width: truncate names on alignment lines to this length
(set to None for no truncation)
"""
s1 = pairwise_alignment.s1
s2 = pairwise_alignment.s2
s1_neg = not s1.is_positive_strand()
s2_neg = not s2.is_positive_strand()
size = pairwise_alignment.size()
# figure out the complement column
s1_comp = "C" if s1_neg else " "
s2_comp = "C" if s2_neg else " "
# figure out the maximum name length, so we can size that column properly;
# pre-compute the space-padded names too
s1_len = len(s1.name)
s2_len = len(s2.name)
f_len = max(s1_len, s2_len)
if m_name_width is not None:
f_len = min(f_len, m_name_width)
s1_n = s1.name[:f_len] + (' ' * (f_len - s1_len))
s2_n = s2.name[:f_len] + (' ' * (f_len - s2_len))
# figure out the max width for the coordinates column; we use size of the
# alignment here rather than ungapped coordinates because its an upper
# bound and easier to compute (i.e. for sure already know).
s1_line_end_num = (s1.end if s1_neg else s1.start - 1)
s2_line_end_num = (s2.end if s2_neg else s2.start - 1)
max_num_len = max(len(str(s1.start + size)), len(str(s2.start + size)))
res = "" # our result
i = 0 # how much of the full, gapped alignment, has been output so far?
res += _get_repeat_masker_header(pairwise_alignment) + "\n\n"
while i < len(pairwise_alignment.s1):
# keep track of how much of each sequence we've output
s1_sub = s1.gapped_relative_subsequence(i + 1, min(i + column_width + 1, len(s1) + 1))
s2_sub = s2.gapped_relative_subsequence(i + 1, min(i + column_width + 1, len(s2) + 1))
s1_ug_len = s1_sub.ungapped_len
s2_ug_len = s2_sub.ungapped_len
s1_line_start_num = (s1_line_end_num - 1 if s1_neg
else s1_line_end_num + 1)
s1_line_end_num = (s1_line_start_num - s1_ug_len + 1 if s1_neg
else s1_line_start_num + s1_ug_len - 1)
s2_line_start_num = (s2_line_end_num - 1 if s2_neg
else s2_line_end_num + 1)
s2_line_end_num = (s2_line_start_num - s2_ug_len + 1 if s2_neg
else s2_line_start_num + s2_ug_len - 1)
# output sequence one
res += (s1_comp + " " + s1_n + " ")
s1_line_start_num_str = str(s1_line_start_num)
s1_num_padding = max_num_len - len(s1_line_start_num_str)
res += (' ' * s1_num_padding) + s1_line_start_num_str + " "
res += pairwise_alignment.s1[i:i + column_width] + " "
res += str(s1_line_end_num) + "\n"
# output the annotation string, if we have one; needs to be padded by the
# number of char in the name col (f_len), the number in the coordinate
# col (max_num_len), the one char in the complement columns, and the
# three spaces that are used as column seperators for those.
if ANNOTATION_KEY in pairwise_alignment.meta:
res += (((f_len + max_num_len) * ' ') + " " +
pairwise_alignment.meta[ANNOTATION_KEY][i:i + column_width] + "\n")
# output sequence two
res += (s2_comp + " " + s2_n + " ")
s2_line_start_num_str = str(s2_line_start_num)
s2_num_padding = max_num_len - len(s2_line_start_num_str)
res += (' ' * s2_num_padding) + s2_line_start_num_str + " "
res += pairwise_alignment.s2[i:i + column_width] + " "
res += str(s2_line_end_num) + "\n"
res += "\n"
i += column_width
# otuput any meta data key-value pairs that aren't known to us.
if pairwise_alignment.meta is not None:
for k in pairwise_alignment.meta:
if k not in KNOWN_KEYS:
if k is ROUNDTRIP_KEY:
res += (pairwise_alignment.meta[k] + "\n")
else:
res += (k + " = " + str(pairwise_alignment.meta[k]) + "\n")
# remove any trailing whitespace
res = res.strip()
return res | generate a repeatmasker formated representation of this pairwise alignment.
:param column_width: number of characters to output per line of alignment
:param m_name_width: truncate names on alignment lines to this length
(set to None for no truncation) |
def _make_y_title(self):
"""Make the Y-Axis title"""
if self._y_title:
yc = self.margin_box.top + self.view.height / 2
for i, title_line in enumerate(self._y_title, 1):
text = self.svg.node(
self.nodes['title'],
'text',
class_='title',
x=self._legend_at_left_width,
y=i * (self.style.title_font_size + self.spacing) + yc
)
text.attrib['transform'] = "rotate(%d %f %f)" % (
-90, self._legend_at_left_width, yc
)
text.text = title_line | Make the Y-Axis title |
def check_bottleneck(text):
"""Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61
"""
err = "mixed_metaphors.misc.bottleneck"
msg = u"Mixed metaphor — bottles with big necks are easy to pass through."
list = [
"biggest bottleneck",
"big bottleneck",
"large bottleneck",
"largest bottleneck",
"world-wide bottleneck",
"huge bottleneck",
"massive bottleneck",
]
return existence_check(text, list, err, msg, max_errors=1) | Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61 |
def beacon_link(variant_obj, build=None):
"""Compose link to Beacon Network."""
build = build or 37
url_template = ("https://beacon-network.org/#/search?pos={this[position]}&"
"chrom={this[chromosome]}&allele={this[alternative]}&"
"ref={this[reference]}&rs=GRCh37")
# beacon does not support build 38 at the moment
# if build == '38':
# url_template = ("https://beacon-network.org/#/search?pos={this[position]}&"
# "chrom={this[chromosome]}&allele={this[alternative]}&"
# "ref={this[reference]}&rs=GRCh38")
return url_template.format(this=variant_obj) | Compose link to Beacon Network. |
def do_ams_put(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"):
'''Do a AMS HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body.
'''
min_ds = dsversion_min
content_acceptformat = json_acceptformat
if rformat == "json_only":
min_ds = ds_min_version
content_acceptformat = json_only_acceptformat
headers = {"Content-Type": content_acceptformat,
"DataServiceVersion": min_ds,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.put(endpoint, data=body, headers=headers, allow_redirects=False)
# AMS response to the first call can be a redirect,
# so we handle it here to make it transparent for the caller...
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.put(redirected_url, data=body, headers=headers)
return response | Do a AMS HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
rformat (str): A required JSON Accept Format.
ds_min_version (str): A required DS MIN Version.
Returns:
HTTP response. JSON body. |
def expand_macros(raw_text, macros):
"""
this gets called before the sakefile is parsed. it looks for
macros defined anywhere in the sakefile (the start of the line
is '#!') and then replaces all occurences of '$variable' with the
value defined in the macro. it then returns the contents of the
file with the macros expanded.
"""
includes = {}
result = []
pattern = re.compile("#!\s*(\w+)\s*(?:(\??\s*)=\s*(.*$)|or\s*(.*))", re.UNICODE)
ipattern = re.compile("#<\s*(\S+)\s*(optional|or\s+(.+))?$", re.UNICODE)
for line in raw_text.split("\n"):
line = string.Template(line).safe_substitute(macros)
# note that the line is appended to result before it is checked for macros
# this prevents macros expanding into themselves
result.append(line)
if line.startswith("#!"):
match = pattern.match(line)
try:
var, opt, val, or_ = match.group(1, 2, 3, 4)
except:
raise InvalidMacroError("Failed to parse macro {}\n".format(line))
if or_:
if var not in macros:
raise InvalidMacroError("Macro {} is not defined: {}\n".format(var, or_))
elif not (opt and var in macros):
macros[var] = val
elif line.startswith("#<"):
match = ipattern.match(line)
try:
filename = match.group(1)
except:
error("Failed to parse include {}\n".format(line))
sys.exit(1)
try:
with io.open(filename, 'r') as f:
includes[filename] = expand_macros(f.read(), macros)
except IOError:
if match.group(2):
if match.group(2).startswith('or '):
sprint(match.group(3))
else:
error("Nonexistent include {}\n".format(filename))
sys.exit(1)
return "\n".join(result), includes | this gets called before the sakefile is parsed. it looks for
macros defined anywhere in the sakefile (the start of the line
is '#!') and then replaces all occurences of '$variable' with the
value defined in the macro. it then returns the contents of the
file with the macros expanded. |
def add_group_member(self, grp_name, user):
"""
Add the given user to the named group.
Both group and user must already exist for this to succeed.
Args:
name (string): Name of group.
user_name (string): User to add to group.
Raises:
requests.HTTPError on failure.
"""
self.project_service.set_auth(self._token_project)
self.project_service.add_group_member(grp_name, user) | Add the given user to the named group.
Both group and user must already exist for this to succeed.
Args:
name (string): Name of group.
user_name (string): User to add to group.
Raises:
requests.HTTPError on failure. |
def url(self):
"""The URL as a string of the resource."""
if not self._url[2].endswith('/'):
self._url[2] += '/'
return RestURL.url.__get__(self) | The URL as a string of the resource. |
def _set_session_style(self, v, load=False):
"""
Setter method for session_style, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/session_style (session-reservation-style)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_style is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_style() directly.
YANG Description: Style of session
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'reservation-style-shared-explicit': {'value': 2}, u'reservation-style-wildcard-filter': {'value': 0}, u'reservation-style-unknown': {'value': 3}, u'reservation-style-fixed-filter': {'value': 1}},), is_leaf=True, yang_name="session-style", rest_name="session-style", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-reservation-style', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """session_style must be of a type compatible with session-reservation-style""",
'defined-type': "brocade-mpls-operational:session-reservation-style",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'reservation-style-shared-explicit': {'value': 2}, u'reservation-style-wildcard-filter': {'value': 0}, u'reservation-style-unknown': {'value': 3}, u'reservation-style-fixed-filter': {'value': 1}},), is_leaf=True, yang_name="session-style", rest_name="session-style", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-reservation-style', is_config=False)""",
})
self.__session_style = t
if hasattr(self, '_set'):
self._set() | Setter method for session_style, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/session_style (session-reservation-style)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_style is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_style() directly.
YANG Description: Style of session |
def _ostaunicode(src):
# type: (str) -> bytes
'''
Internal function to create an OSTA byte string from a source string.
'''
if have_py_3:
bytename = src
else:
bytename = src.decode('utf-8') # type: ignore
try:
enc = bytename.encode('latin-1')
encbyte = b'\x08'
except (UnicodeEncodeError, UnicodeDecodeError):
enc = bytename.encode('utf-16_be')
encbyte = b'\x10'
return encbyte + enc | Internal function to create an OSTA byte string from a source string. |
def check_layer(layer, has_geometry=True):
"""Helper to check layer validity.
This function wil; raise InvalidLayerError if the layer is invalid.
:param layer: The layer to check.
:type layer: QgsMapLayer
:param has_geometry: If the layer must have a geometry. True by default.
If it's a raster layer, we will no check this parameter. If we do not
want to check the geometry type, we can set it to None.
:type has_geometry: bool,None
:raise: InvalidLayerError
:return: Return True if the layer is valid.
:rtype: bool
"""
if is_vector_layer(layer) or is_raster_layer(layer):
if not layer.isValid():
raise InvalidLayerError(
'The layer is invalid : %s' % layer.publicSource())
if is_vector_layer(layer):
sub_layers = layer.dataProvider().subLayers()
if len(sub_layers) > 1:
names = ';'.join(sub_layers)
source = layer.source()
raise InvalidLayerError(
tr('The layer should not have many sublayers : {source} : '
'{names}').format(source=source, names=names))
# We only check the geometry if we have at least one feature.
if layer.geometryType() == QgsWkbTypes.UnknownGeometry and (
layer.featureCount() != 0):
raise InvalidLayerError(
tr('The layer has not a valid geometry type.'))
if layer.wkbType() == QgsWkbTypes.Unknown and (
layer.featureCount() != 0):
raise InvalidLayerError(
tr('The layer has not a valid geometry type.'))
if isinstance(has_geometry, bool) and layer.featureCount() != 0:
if layer.isSpatial() != has_geometry:
raise InvalidLayerError(
tr('The layer has not a correct geometry type.'))
else:
raise InvalidLayerError(
tr('The layer is neither a raster nor a vector : {type}').format(
type=type(layer)))
return True | Helper to check layer validity.
This function wil; raise InvalidLayerError if the layer is invalid.
:param layer: The layer to check.
:type layer: QgsMapLayer
:param has_geometry: If the layer must have a geometry. True by default.
If it's a raster layer, we will no check this parameter. If we do not
want to check the geometry type, we can set it to None.
:type has_geometry: bool,None
:raise: InvalidLayerError
:return: Return True if the layer is valid.
:rtype: bool |
def cli(env, columns, sortby, volume_id):
"""List ACLs."""
block_manager = SoftLayer.BlockStorageManager(env.client)
access_list = block_manager.get_block_volume_access_list(
volume_id=volume_id)
table = formatting.Table(columns.columns)
table.sortby = sortby
for key, type_name in [('allowedVirtualGuests', 'VIRTUAL'),
('allowedHardware', 'HARDWARE'),
('allowedSubnets', 'SUBNET'),
('allowedIpAddresses', 'IP')]:
for obj in access_list.get(key, []):
obj['type'] = type_name
table.add_row([value or formatting.blank()
for value in columns.row(obj)])
env.fout(table) | List ACLs. |
def _select_best_remaining_qubit(self, prog_qubit):
"""
Select the best remaining hardware qubit for the next program qubit.
"""
reliab_store = {}
for hw_qubit in self.available_hw_qubits:
reliab = 1
for n in self.prog_graph.neighbors(prog_qubit):
if n in self.prog2hw:
reliab *= self.swap_costs[self.prog2hw[n]][hw_qubit]
reliab *= self.readout_errors[hw_qubit]
reliab_store[hw_qubit] = reliab
max_reliab = 0
best_hw_qubit = None
for hw_qubit in reliab_store:
if reliab_store[hw_qubit] > max_reliab:
max_reliab = reliab_store[hw_qubit]
best_hw_qubit = hw_qubit
return best_hw_qubit | Select the best remaining hardware qubit for the next program qubit. |
def _fix_labels(self):
"""For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image
"""
for s in self.systems:
mag0 = np.inf
n0 = None
for n in self.get_system(s):
if isinstance(n.parent, DummyObsNode):
continue
mag, _ = n.parent.value
if mag < mag0:
mag0 = mag
n0 = n
# If brightest is not tag _0, then switch them.
if n0 is not None and n0.tag != 0:
n_other = self.get_leaf('{}_{}'.format(s,0))
n_other.tag = n0.tag
n0.tag = 0 | For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image |
def fetch(self, addon_id, data={}, **kwargs):
""""
Fetch addon for given Id
Args:
addon_id : Id for which addon object has to be retrieved
Returns:
addon dict for given subscription Id
"""
return super(Addon, self).fetch(addon_id, data, **kwargs) | Fetch addon for given Id
Args:
addon_id : Id for which addon object has to be retrieved
Returns:
addon dict for given subscription Id |
def MakeExecutableTemplate(self, output_file=None):
"""Windows templates also include the nanny."""
super(WindowsClientBuilder,
self).MakeExecutableTemplate(output_file=output_file)
self.MakeBuildDirectory()
self.BuildWithPyInstaller()
# Get any dll's that pyinstaller forgot:
for module in EnumMissingModules():
logging.info("Copying additional dll %s.", module)
shutil.copy(module, self.output_dir)
self.BuildNanny()
# Generate a prod and a debug version of nanny executable.
shutil.copy(
os.path.join(self.output_dir, "GRRservice.exe"),
os.path.join(self.output_dir, "dbg_GRRservice.exe"))
with open(os.path.join(self.output_dir, "GRRservice.exe"), "r+") as fd:
build.SetPeSubsystem(fd, console=False)
with open(os.path.join(self.output_dir, "dbg_GRRservice.exe"), "r+") as fd:
build.SetPeSubsystem(fd, console=True)
# Generate a prod and a debug version of client executable.
shutil.copy(
os.path.join(self.output_dir, "grr-client.exe"),
os.path.join(self.output_dir, "dbg_grr-client.exe"))
with open(os.path.join(self.output_dir, "grr-client.exe"), "r+") as fd:
build.SetPeSubsystem(fd, console=False)
with open(os.path.join(self.output_dir, "dbg_grr-client.exe"), "r+") as fd:
build.SetPeSubsystem(fd, console=True)
self.MakeZip(self.output_dir, self.template_file) | Windows templates also include the nanny. |
def plot_station_mapping(
target_latitude,
target_longitude,
isd_station,
distance_meters,
target_label="target",
): # pragma: no cover
""" Plots this mapping on a map."""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Plotting requires matplotlib.")
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
except ImportError:
raise ImportError("Plotting requires cartopy.")
lat, lng = isd_station.coords
t_lat, t_lng = float(target_latitude), float(target_longitude)
# fiture
fig = plt.figure(figsize=(16, 8))
# axes
tiles = cimgt.StamenTerrain()
ax = plt.subplot(1, 1, 1, projection=tiles.crs)
# offsets for labels
x_max = max([lng, t_lng])
x_min = min([lng, t_lng])
x_diff = x_max - x_min
y_max = max([lat, t_lat])
y_min = min([lat, t_lat])
y_diff = y_max - y_min
xoffset = x_diff * 0.05
yoffset = y_diff * 0.05
# minimum
left = x_min - x_diff * 0.5
right = x_max + x_diff * 0.5
bottom = y_min - y_diff * 0.3
top = y_max + y_diff * 0.3
width_ratio = 2.
height_ratio = 1.
if (right - left) / (top - bottom) > width_ratio / height_ratio:
# too short
goal = (right - left) * height_ratio / width_ratio
diff = goal - (top - bottom)
bottom = bottom - diff / 2.
top = top + diff / 2.
else:
# too skinny
goal = (top - bottom) * width_ratio / height_ratio
diff = goal - (right - left)
left = left - diff / 2.
right = right + diff / 2.
ax.set_extent([left, right, bottom, top])
# determine zoom level
# tile size at level 1 = 64 km
# level 2 = 32 km, level 3 = 16 km, etc, i.e. 128/(2^n) km
N_TILES = 600 # (how many tiles approximately fit in distance)
km = distance_meters / 1000.0
zoom_level = int(np.log2(128 * N_TILES / km))
ax.add_image(tiles, zoom_level)
# line between
plt.plot(
[lng, t_lng],
[lat, t_lat],
linestyle="-",
dashes=[2, 2],
transform=ccrs.Geodetic(),
)
# station
ax.plot(lng, lat, "ko", markersize=7, transform=ccrs.Geodetic())
# target
ax.plot(t_lng, t_lat, "ro", markersize=7, transform=ccrs.Geodetic())
# station label
station_label = "{} ({})".format(isd_station.usaf_id, isd_station.name)
ax.text(lng + xoffset, lat + yoffset, station_label, transform=ccrs.Geodetic())
# target label
ax.text(t_lng + xoffset, t_lat + yoffset, target_label, transform=ccrs.Geodetic())
# distance labels
mid_lng = (lng + t_lng) / 2
mid_lat = (lat + t_lat) / 2
dist_text = "{:.01f} km".format(km)
ax.text(mid_lng + xoffset, mid_lat + yoffset, dist_text, transform=ccrs.Geodetic())
plt.show() | Plots this mapping on a map. |
def call_bad_cb(self, tb):
"""
If bad_cb returns True then keep it
:param tb: traceback that caused exception
:return:
"""
with LiveExecution.lock:
if self.bad_cb and not self.bad_cb(tb):
self.bad_cb = None | If bad_cb returns True then keep it
:param tb: traceback that caused exception
:return: |
def getExtensionArgs(self):
"""Get a dictionary of unqualified simple registration
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the simple
registration request fields.
@rtype: {str:str}
"""
args = {}
if self.required:
args['required'] = ','.join(self.required)
if self.optional:
args['optional'] = ','.join(self.optional)
if self.policy_url:
args['policy_url'] = self.policy_url
return args | Get a dictionary of unqualified simple registration
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the simple
registration request fields.
@rtype: {str:str} |
def process_index(self, url, page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts) == 2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
)
else:
return "" | Process the contents of a PyPI page |
def verify_exif(filename):
'''
Check that image file has the required EXIF fields.
Incompatible files will be ignored server side.
'''
# required tags in IFD name convention
required_exif = required_fields()
exif = ExifRead(filename)
required_exif_exist = exif.fields_exist(required_exif)
return required_exif_exist | Check that image file has the required EXIF fields.
Incompatible files will be ignored server side. |
def inspect_tables(conn, database_metadata):
" List tables and their row counts, excluding uninteresting tables. "
tables = {}
table_names = [
r["name"]
for r in conn.execute(
'select * from sqlite_master where type="table"'
)
]
for table in table_names:
table_metadata = database_metadata.get("tables", {}).get(
table, {}
)
try:
count = conn.execute(
"select count(*) from {}".format(escape_sqlite(table))
).fetchone()[0]
except sqlite3.OperationalError:
# This can happen when running against a FTS virtual table
# e.g. "select count(*) from some_fts;"
count = 0
column_names = table_columns(conn, table)
tables[table] = {
"name": table,
"columns": column_names,
"primary_keys": detect_primary_keys(conn, table),
"count": count,
"hidden": table_metadata.get("hidden") or False,
"fts_table": detect_fts(conn, table),
}
foreign_keys = get_all_foreign_keys(conn)
for table, info in foreign_keys.items():
tables[table]["foreign_keys"] = info
# Mark tables 'hidden' if they relate to FTS virtual tables
hidden_tables = [
r["name"]
for r in conn.execute(
"""
select name from sqlite_master
where rootpage = 0
and sql like '%VIRTUAL TABLE%USING FTS%'
"""
)
]
if detect_spatialite(conn):
# Also hide Spatialite internal tables
hidden_tables += [
"ElementaryGeometries",
"SpatialIndex",
"geometry_columns",
"spatial_ref_sys",
"spatialite_history",
"sql_statements_log",
"sqlite_sequence",
"views_geometry_columns",
"virts_geometry_columns",
] + [
r["name"]
for r in conn.execute(
"""
select name from sqlite_master
where name like "idx_%"
and type = "table"
"""
)
]
for t in tables.keys():
for hidden_table in hidden_tables:
if t == hidden_table or t.startswith(hidden_table):
tables[t]["hidden"] = True
continue
return tables | List tables and their row counts, excluding uninteresting tables. |
def schedule_job(self, job):
"""
schedule a job to the type of workers spawned by self.start_workers.
:param job: the job to schedule for running.
:return:
"""
l = _reraise_with_traceback(job.get_lambda_to_execute())
future = self.workers.submit(l, update_progress_func=self.update_progress, cancel_job_func=self._check_for_cancel)
# assign the futures to a dict, mapping them to a job
self.job_future_mapping[future] = job
self.future_job_mapping[job.job_id] = future
# callback for when the future is now!
future.add_done_callback(self.handle_finished_future)
# add the job to our cancel notifications data structure, with False at first
self.cancel_notifications[job.job_id] = False
return future | schedule a job to the type of workers spawned by self.start_workers.
:param job: the job to schedule for running.
:return: |
def assert_reset(self, asserted):
"""Assert or de-assert target reset line"""
try:
self._invalidate_cached_registers()
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | Assert or de-assert target reset line |
def organization_users(self, id, permission_set=None, role=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/users#list-users"
api_path = "/api/v2/organizations/{id}/users.json"
api_path = api_path.format(id=id)
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if permission_set:
api_query.update({
"permission_set": permission_set,
})
if role:
api_query.update({
"role": role,
})
return self.call(api_path, query=api_query, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/users#list-users |
def _repr_categories_info(self):
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]" | Returns a string representation of the footer. |
def av(self, data, lon_str=LON_STR, lat_str=LAT_STR,
land_mask_str=LAND_MASK_STR, sfc_area_str=SFC_AREA_STR):
"""Time-average of region-averaged data.
Parameters
----------
data : xarray.DataArray
The array to compute the regional time-average of
lat_str, lon_str, land_mask_str, sfc_area_str : str, optional
The name of the latitude, longitude, land mask, and surface area
coordinates, respectively, in ``data``. Defaults are the
corresponding values in ``aospy.internal_names``.
Returns
-------
xarray.DataArray
The region-averaged and time-averaged data.
"""
ts = self.ts(data, lon_str=lon_str, lat_str=lat_str,
land_mask_str=land_mask_str, sfc_area_str=sfc_area_str)
if YEAR_STR not in ts.coords:
return ts
else:
return ts.mean(YEAR_STR) | Time-average of region-averaged data.
Parameters
----------
data : xarray.DataArray
The array to compute the regional time-average of
lat_str, lon_str, land_mask_str, sfc_area_str : str, optional
The name of the latitude, longitude, land mask, and surface area
coordinates, respectively, in ``data``. Defaults are the
corresponding values in ``aospy.internal_names``.
Returns
-------
xarray.DataArray
The region-averaged and time-averaged data. |
def convert(outputfile, inputfile, to_format, from_format):
"""
Convert pretrained word embedding file in one format to another.
"""
emb = word_embedding.WordEmbedding.load(
inputfile, format=_input_choices[from_format][1],
binary=_input_choices[from_format][2])
emb.save(outputfile, format=_output_choices[to_format][1],
binary=_output_choices[to_format][2]) | Convert pretrained word embedding file in one format to another. |
def liftover(args):
"""
%prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa
LiftOver CODIS/Y-STR markers.
"""
p = OptionParser(liftover.__doc__)
p.add_option("--checkvalid", default=False, action="store_true",
help="Check minscore, period and length")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
refbed, fastafile = args
genome = pyfasta.Fasta(fastafile)
edits = []
fp = open(refbed)
for i, row in enumerate(fp):
s = STRLine(row)
seq = genome[s.seqid][s.start - 1: s.end].upper()
s.motif = get_motif(seq, len(s.motif))
s.fix_counts(seq)
if opts.checkvalid and not s.is_valid():
continue
edits.append(s)
if i % 10000 == 0:
print(i, "lines read", file=sys.stderr)
edits = natsorted(edits, key=lambda x: (x.seqid, x.start))
for e in edits:
print(str(e)) | %prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa
LiftOver CODIS/Y-STR markers. |
def __safe_errback(self, room_data, err_condition, err_text):
"""
Safe use of the callback method, to avoid errors propagation
:param room_data: A RoomData object
:param err_condition: Category of error
:param err_text: Description of the error
"""
method = room_data.errback
if method is not None:
try:
method(room_data.room, room_data.nick, err_condition, err_text)
except Exception as ex:
self.__logger.exception("Error calling back room creator: %s",
ex) | Safe use of the callback method, to avoid errors propagation
:param room_data: A RoomData object
:param err_condition: Category of error
:param err_text: Description of the error |
def log_errors(f, self, *args, **kwargs):
"""decorator to log unhandled exceptions raised in a method.
For use wrapping on_recv callbacks, so that exceptions
do not cause the stream to be closed.
"""
try:
return f(self, *args, **kwargs)
except Exception:
self.log.error("Uncaught exception in %r" % f, exc_info=True) | decorator to log unhandled exceptions raised in a method.
For use wrapping on_recv callbacks, so that exceptions
do not cause the stream to be closed. |
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + 'tmp_path')
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
# check if we have a user stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._user_listen_key:
self._stop_user_socket() | Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise |
def readlines(self):
"""Returns a list of all lines (optionally parsed) in the file."""
if self.grammar:
tot = []
# Used this way instead of a 'for' loop against
# self.file.readlines() so that there wasn't two copies of the file
# in memory.
while 1:
line = self.file.readline()
if not line:
break
tot.append(line)
return tot
return self.file.readlines() | Returns a list of all lines (optionally parsed) in the file. |
def is_nameserver(self, path):
'''Is the node pointed to by @ref path a name server (specialisation
of directory nodes)?
'''
node = self.get_node(path)
if not node:
return False
return node.is_nameserver | Is the node pointed to by @ref path a name server (specialisation
of directory nodes)? |
def get_fermi_interextrapolated(self, c, T, warn=True, c_ref=1e10, **kwargs):
"""
Similar to get_fermi except that when get_fermi fails to converge,
an interpolated or extrapolated fermi (depending on c) is returned with
the assumption that the fermi level changes linearly with log(abs(c)).
Args:
c (float): doping concentration in 1/cm3. c<0 represents n-type
doping and c>0 p-type doping (i.e. majority carriers are holes)
T (float): absolute temperature in Kelvin
warn (bool): whether to warn for the first time when no fermi can
be found.
c_ref (float): a doping concentration where get_fermi returns a
value without error for both c_ref and -c_ref
**kwargs: see keyword arguments of the get_fermi function
Returns (float): the fermi level that is possibly interapolated or
extrapolated and must be used with caution.
"""
try:
return self.get_fermi(c, T, **kwargs)
except ValueError as e:
if warn:
warnings.warn(str(e))
if abs(c) < c_ref:
if abs(c) < 1e-10:
c = 1e-10
# max(10, ) is to avoid log(0<x<1) and log(1+x) both of which are slow
f2 = self.get_fermi_interextrapolated(max(10, abs(c) * 10.), T, warn=False, **kwargs)
f1 = self.get_fermi_interextrapolated(-max(10, abs(c) * 10.), T, warn=False, **kwargs)
c2 = np.log(abs(1 + self.get_doping(f2, T)))
c1 = -np.log(abs(1 + self.get_doping(f1, T)))
slope = (f2 - f1) / (c2 - c1)
return f2 + slope * (np.sign(c) * np.log(abs(1 + c)) - c2)
else:
f_ref = self.get_fermi_interextrapolated(np.sign(c) * c_ref, T, warn=False, **kwargs)
f_new = self.get_fermi_interextrapolated(c / 10., T, warn=False, **kwargs)
clog = np.sign(c) * np.log(abs(c))
c_newlog = np.sign(c) * np.log(abs(self.get_doping(f_new, T)))
slope = (f_new - f_ref) / (c_newlog - np.sign(c) * 10.)
return f_new + slope * (clog - c_newlog) | Similar to get_fermi except that when get_fermi fails to converge,
an interpolated or extrapolated fermi (depending on c) is returned with
the assumption that the fermi level changes linearly with log(abs(c)).
Args:
c (float): doping concentration in 1/cm3. c<0 represents n-type
doping and c>0 p-type doping (i.e. majority carriers are holes)
T (float): absolute temperature in Kelvin
warn (bool): whether to warn for the first time when no fermi can
be found.
c_ref (float): a doping concentration where get_fermi returns a
value without error for both c_ref and -c_ref
**kwargs: see keyword arguments of the get_fermi function
Returns (float): the fermi level that is possibly interapolated or
extrapolated and must be used with caution. |
def compile(self, X, verbose=False):
"""method to validate and prepare data-dependent parameters
Parameters
---------
X : array-like
Input dataset
verbose : bool
whether to show warnings
Returns
-------
None
"""
if self.feature >= X.shape[1]:
raise ValueError('term requires feature {}, '\
'but X has only {} dimensions'\
.format(self.feature, X.shape[1]))
if self.by is not None and self.by >= X.shape[1]:
raise ValueError('by variable requires feature {}, '\
'but X has only {} dimensions'\
.format(self.by, X.shape[1]))
if not hasattr(self, 'edge_knots_'):
self.edge_knots_ = gen_edge_knots(X[:, self.feature],
self.dtype,
verbose=verbose)
return self | method to validate and prepare data-dependent parameters
Parameters
---------
X : array-like
Input dataset
verbose : bool
whether to show warnings
Returns
-------
None |
def apply_relationships(self, data, obj):
"""Apply relationship provided by data to obj
:param dict data: data provided by the client
:param DeclarativeMeta obj: the sqlalchemy object to plug relationships to
:return boolean: True if relationship have changed else False
"""
relationships_to_apply = []
relationship_fields = get_relationships(self.resource.schema, model_field=True)
for key, value in data.items():
if key in relationship_fields:
related_model = getattr(obj.__class__, key).property.mapper.class_
schema_field = get_schema_field(self.resource.schema, key)
related_id_field = self.resource.schema._declared_fields[schema_field].id_field
if isinstance(value, list):
related_objects = []
for identifier in value:
related_object = self.get_related_object(related_model, related_id_field, {'id': identifier})
related_objects.append(related_object)
relationships_to_apply.append({'field': key, 'value': related_objects})
else:
related_object = None
if value is not None:
related_object = self.get_related_object(related_model, related_id_field, {'id': value})
relationships_to_apply.append({'field': key, 'value': related_object})
for relationship in relationships_to_apply:
setattr(obj, relationship['field'], relationship['value']) | Apply relationship provided by data to obj
:param dict data: data provided by the client
:param DeclarativeMeta obj: the sqlalchemy object to plug relationships to
:return boolean: True if relationship have changed else False |
def _parse_caps_bank(bank):
'''
Parse the <bank> element of the connection capabilities XML.
'''
result = {
'id': int(bank.get('id')),
'level': int(bank.get('level')),
'type': bank.get('type'),
'size': "{} {}".format(bank.get('size'), bank.get('unit')),
'cpus': bank.get('cpus')
}
controls = []
for control in bank.findall('control'):
unit = control.get('unit')
result_control = {
'granularity': "{} {}".format(control.get('granularity'), unit),
'type': control.get('type'),
'maxAllocs': int(control.get('maxAllocs'))
}
minimum = control.get('min')
if minimum:
result_control['min'] = "{} {}".format(minimum, unit)
controls.append(result_control)
if controls:
result['controls'] = controls
return result | Parse the <bank> element of the connection capabilities XML. |
def water(target, temperature='pore.temperature', salinity='pore.salinity'):
r"""
Calculates vapor pressure of pure water or seawater given by [1] based on
Raoult's law. The pure water vapor pressure is given by [2]
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
temperature : string
The dictionary key containing the phase temperature values
salinity : string
The dictionary key containing the phase salinity values
Returns
-------
The vapor pressure of water/seawater in [Pa]
Notes
-----
T must be in K, and S in g of salt per kg of phase, or ppt (parts per
thousand)
VALIDITY: 273 < T < 473 K; 0 < S < 240 g/kg;
ACCURACY: 0.5 %
References
----------
[1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and
Water Treatment, 2010.
[2] ASHRAE handbook: Fundamentals, ASHRAE; 2005.
"""
T = target[temperature]
if salinity in target.keys():
S = target[salinity]
else:
S = 0
a1 = -5.8002206E+03
a2 = 1.3914993E+00
a3 = -4.8640239E-02
a4 = 4.1764768E-05
a5 = -1.4452093E-08
a6 = 6.5459673E+00
Pv_w = np.exp((a1/T) + a2 + a3*T + a4*T**2 + a5*T**3 + a6*np.log(T))
Pv_sw = Pv_w/(1+0.57357*(S/(1000-S)))
value = Pv_sw
return value | r"""
Calculates vapor pressure of pure water or seawater given by [1] based on
Raoult's law. The pure water vapor pressure is given by [2]
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
temperature : string
The dictionary key containing the phase temperature values
salinity : string
The dictionary key containing the phase salinity values
Returns
-------
The vapor pressure of water/seawater in [Pa]
Notes
-----
T must be in K, and S in g of salt per kg of phase, or ppt (parts per
thousand)
VALIDITY: 273 < T < 473 K; 0 < S < 240 g/kg;
ACCURACY: 0.5 %
References
----------
[1] Sharqawy M. H., Lienhard J. H., and Zubair, S. M., Desalination and
Water Treatment, 2010.
[2] ASHRAE handbook: Fundamentals, ASHRAE; 2005. |
def sort_protein_group(pgroup, sortfunctions, sortfunc_index):
"""Recursive function that sorts protein group by a number of sorting
functions."""
pgroup_out = []
subgroups = sortfunctions[sortfunc_index](pgroup)
sortfunc_index += 1
for subgroup in subgroups:
if len(subgroup) > 1 and sortfunc_index < len(sortfunctions):
pgroup_out.extend(sort_protein_group(subgroup,
sortfunctions,
sortfunc_index))
else:
pgroup_out.extend(subgroup)
return pgroup_out | Recursive function that sorts protein group by a number of sorting
functions. |
def get_kgXref_hg19(self):
""" Get UCSC kgXref table for Build 37.
Returns
-------
pandas.DataFrame
kgXref table if loading was successful, else None
"""
if self._kgXref_hg19 is None:
self._kgXref_hg19 = self._load_kgXref(self._get_path_kgXref_hg19())
return self._kgXref_hg19 | Get UCSC kgXref table for Build 37.
Returns
-------
pandas.DataFrame
kgXref table if loading was successful, else None |
def ReadCronJobs(self, cronjob_ids=None, cursor=None):
"""Reads all cronjobs from the database."""
query = ("SELECT job, UNIX_TIMESTAMP(create_time), enabled, "
"forced_run_requested, last_run_status, "
"UNIX_TIMESTAMP(last_run_time), current_run_id, state, "
"UNIX_TIMESTAMP(leased_until), leased_by "
"FROM cron_jobs")
if cronjob_ids is None:
cursor.execute(query)
return [self._CronJobFromRow(row) for row in cursor.fetchall()]
query += " WHERE job_id IN (%s)" % ", ".join(["%s"] * len(cronjob_ids))
cursor.execute(query, cronjob_ids)
res = []
for row in cursor.fetchall():
res.append(self._CronJobFromRow(row))
if len(res) != len(cronjob_ids):
missing = set(cronjob_ids) - set([c.cron_job_id for c in res])
raise db.UnknownCronJobError("CronJob(s) with id(s) %s not found." %
missing)
return res | Reads all cronjobs from the database. |
def overwrite_file_check(args, filename):
"""If filename exists, overwrite or modify it to be unique."""
if not args['overwrite'] and os.path.exists(filename):
# Confirm overwriting of the file, or modify filename
if args['no_overwrite']:
overwrite = False
else:
try:
overwrite = confirm_input(input('Overwrite {0}? (yes/no): '
.format(filename)))
except (KeyboardInterrupt, EOFError):
sys.exit()
if not overwrite:
new_filename = modify_filename_id(filename)
while os.path.exists(new_filename):
new_filename = modify_filename_id(new_filename)
return new_filename
return filename | If filename exists, overwrite or modify it to be unique. |
def calculate_batch_normalization_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C] ---> [N, C]
2. [N, C, H, W] ---> [N, C, H, W]
This operator just uses the operator input shape as its output shape.
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
input_shape = operator.inputs[0].type.shape
if len(input_shape) not in [2, 4]:
raise RuntimeError('Input must be a 2-D or a 4-D tensor')
operator.outputs[0].type.shape = copy.deepcopy(operator.inputs[0].type.shape) | Allowed input/output patterns are
1. [N, C] ---> [N, C]
2. [N, C, H, W] ---> [N, C, H, W]
This operator just uses the operator input shape as its output shape. |
def _set_labels(self, catalogue):
"""Returns a dictionary of the unique labels in `catalogue` and the
count of all tokens associated with each, and sets the record
of each Text to its corresponding label.
Texts that do not have a label specified are set to the empty
string.
Token counts are included in the results to allow for
semi-accurate sorting based on corpora size.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:rtype: `dict`
"""
with self._conn:
self._conn.execute(constants.UPDATE_LABELS_SQL, [''])
labels = {}
for work, label in catalogue.items():
self._conn.execute(constants.UPDATE_LABEL_SQL, [label, work])
cursor = self._conn.execute(
constants.SELECT_TEXT_TOKEN_COUNT_SQL, [work])
token_count = cursor.fetchone()['token_count']
labels[label] = labels.get(label, 0) + token_count
return labels | Returns a dictionary of the unique labels in `catalogue` and the
count of all tokens associated with each, and sets the record
of each Text to its corresponding label.
Texts that do not have a label specified are set to the empty
string.
Token counts are included in the results to allow for
semi-accurate sorting based on corpora size.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:rtype: `dict` |
def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]:
"""
Return a dict containing the IPFS hash, file name, and size of a file.
"""
if file_or_dir_path.is_dir():
asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob("*")]
elif file_or_dir_path.is_file():
asset_data = [dummy_ipfs_pin(file_or_dir_path)]
else:
raise FileNotFoundError(
f"{file_or_dir_path} is not a valid file or directory path."
)
return asset_data | Return a dict containing the IPFS hash, file name, and size of a file. |
def register_component(self, path):
"""
Registers a Component using given path.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.components
{u'core.tests_component_a': <manager.components_manager.Profile object at 0x11c9eb0>}
:param path: Component path.
:type path: unicode
:return: Method success.
:rtype: bool
"""
component = foundations.strings.get_splitext_basename(path)
LOGGER.debug("> Current Component: '{0}'.".format(component))
profile = Profile(file=path)
if profile.initializeProfile():
if os.path.isfile(os.path.join(profile.directory, profile.package) + ".py") or \
os.path.isdir(os.path.join(profile.directory, profile.package)) or \
os.path.basename(profile.directory) == profile.package:
self.__components[profile.name] = profile
return True
else:
raise manager.exceptions.ComponentModuleError(
"{0} | '{1}' has no associated module and has been rejected!".format(self.__class__.__name__,
component))
else:
raise manager.exceptions.ComponentProfileError(
"{0} | '{1}' is not a valid Component and has been rejected!".format(self.__class__.__name__,
component)) | Registers a Component using given path.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.components
{u'core.tests_component_a': <manager.components_manager.Profile object at 0x11c9eb0>}
:param path: Component path.
:type path: unicode
:return: Method success.
:rtype: bool |
def get_features(cls, entry):
"""
get list of `models.Feature` from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.Feature`
"""
features = []
for feature in entry.iterfind("./feature"):
feature_dict = {
'description': feature.attrib.get('description'),
'type_': feature.attrib['type'],
'identifier': feature.attrib.get('id')
}
features.append(models.Feature(**feature_dict))
return features | get list of `models.Feature` from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.Feature` |
def get_sort_limit():
""" returns the 'sort_limit' from the request
"""
limit = _.convert(get("sort_limit"), _.to_int)
if (limit < 1):
limit = None # catalog raises IndexError if limit < 1
return limit | returns the 'sort_limit' from the request |
def _match_iter_generic(self, path_elements, start_at):
"""Implementation of match_iter for >1 self.elements"""
length = len(path_elements)
# If bound to start, we stop searching at the first element
if self.bound_start:
end = 1
else:
end = length - self.length + 1
# If bound to end, we start searching as late as possible
if self.bound_end:
start = length - self.length
else:
start = start_at
if start > end or start < start_at or end > length - self.length + 1:
# It's impossible to match. Either
# 1) the search has a fixed start and end, and path_elements
# does not have enough elements for a match, or
# 2) To match the bound_end, we have to start before the start_at,
# which means the search is impossible
# 3) The end is after the last possible end point in path_elements
return
for index in range(start, end):
matched = True
i = index
for matcher in self.elements:
element = path_elements[i]
i += 1
if not matcher.match(element):
matched = False
break
if matched:
yield index + self.length | Implementation of match_iter for >1 self.elements |
def point_plane_distance(points,
plane_normal,
plane_origin=[0.0, 0.0, 0.0]):
"""
The minimum perpendicular distance of a point to a plane.
Parameters
-----------
points: (n, 3) float, points in space
plane_normal: (3,) float, normal vector
plane_origin: (3,) float, plane origin in space
Returns
------------
distances: (n,) float, distance from point to plane
"""
points = np.asanyarray(points, dtype=np.float64)
w = points - plane_origin
distances = np.dot(plane_normal, w.T) / np.linalg.norm(plane_normal)
return distances | The minimum perpendicular distance of a point to a plane.
Parameters
-----------
points: (n, 3) float, points in space
plane_normal: (3,) float, normal vector
plane_origin: (3,) float, plane origin in space
Returns
------------
distances: (n,) float, distance from point to plane |
def add_edge(self, vertex1, vertex2, multicolor, merge=True, data=None):
""" Creates a new :class:`bg.edge.BGEdge` object from supplied information and adds it to current instance of :class:`BreakpointGraph`.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__add_bgedge` method.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param multicolor: an information about multi-colors of added edge
:type multicolor: :class:`bg.multicolor.Multicolor`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes
"""
self.__add_bgedge(BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=multicolor, data=data), merge=merge) | Creates a new :class:`bg.edge.BGEdge` object from supplied information and adds it to current instance of :class:`BreakpointGraph`.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__add_bgedge` method.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param multicolor: an information about multi-colors of added edge
:type multicolor: :class:`bg.multicolor.Multicolor`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes |
def _get_contours(self):
"""
Returns a list of contours in the path, as BezierPath objects.
A contour is a sequence of lines and curves separated from the next contour by a MOVETO.
For example, the glyph "o" has two contours: the inner circle and the outer circle.
"""
# Originally from nodebox-gl
contours = []
current_contour = None
empty = True
for i, el in enumerate(self._get_elements()):
if el.cmd == MOVETO:
if not empty:
contours.append(current_contour)
current_contour = BezierPath(self._bot)
current_contour.moveto(el.x, el.y)
empty = True
elif el.cmd == LINETO:
empty = False
current_contour.lineto(el.x, el.y)
elif el.cmd == CURVETO:
empty = False
current_contour.curveto(el.c1x, el.c1y, el.c2x, el.c2y, el.x, el.y)
elif el.cmd == CLOSE:
current_contour.closepath()
if not empty:
contours.append(current_contour)
return contours | Returns a list of contours in the path, as BezierPath objects.
A contour is a sequence of lines and curves separated from the next contour by a MOVETO.
For example, the glyph "o" has two contours: the inner circle and the outer circle. |
def keys(self, element=None, mode=None):
r"""
This subclass works exactly like ``keys`` when no arguments are passed,
but optionally accepts an ``element`` and/or a ``mode``, which filters
the output to only the requested keys.
The default behavior is exactly equivalent to the normal ``keys``
method.
Parameters
----------
element : string
Can be either 'pore' or 'throat', which limits the returned list of
keys to only 'pore' or 'throat' keys. If neither is given, then
both are assumed.
mode : string (optional, default is 'skip')
Controls which keys are returned. Options are:
**``None``** : This mode (default) bypasses this subclassed method
and just returns the normal KeysView object.
**'labels'** : Limits the returned list of keys to only 'labels'
(boolean arrays)
**'props'** : Limits he return list of keys to only 'props'
(numerical arrays).
**'all'** : Returns both 'labels' and 'props'. This is equivalent
to sending a list of both 'labels' and 'props'.
See Also
--------
props
labels
Notes
-----
This subclass can be used to get dictionary keys of specific kinds of
data. It's use augments ``props`` and ``labels`` by returning a list
containing both types, but possibly limited by element type ('pores'
or 'throats'.)
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic([5, 5, 5])
>>> pn.keys(mode='props') # Get all props
['pore.coords', 'throat.conns']
>>> pn.keys(mode='props', element='pore') # Get only pore props
['pore.coords']
"""
if mode is None:
return super().keys()
element = self._parse_element(element=element)
allowed = ['props', 'labels']
if 'all' in mode:
mode = allowed
mode = self._parse_mode(mode=mode, allowed=allowed)
keys = super().keys()
temp = []
if 'props' in mode:
temp.extend([i for i in keys if self.get(i).dtype != bool])
if 'labels' in mode:
temp.extend([i for i in keys if self.get(i).dtype == bool])
if element:
temp = [i for i in temp if i.split('.')[0] in element]
return temp | r"""
This subclass works exactly like ``keys`` when no arguments are passed,
but optionally accepts an ``element`` and/or a ``mode``, which filters
the output to only the requested keys.
The default behavior is exactly equivalent to the normal ``keys``
method.
Parameters
----------
element : string
Can be either 'pore' or 'throat', which limits the returned list of
keys to only 'pore' or 'throat' keys. If neither is given, then
both are assumed.
mode : string (optional, default is 'skip')
Controls which keys are returned. Options are:
**``None``** : This mode (default) bypasses this subclassed method
and just returns the normal KeysView object.
**'labels'** : Limits the returned list of keys to only 'labels'
(boolean arrays)
**'props'** : Limits he return list of keys to only 'props'
(numerical arrays).
**'all'** : Returns both 'labels' and 'props'. This is equivalent
to sending a list of both 'labels' and 'props'.
See Also
--------
props
labels
Notes
-----
This subclass can be used to get dictionary keys of specific kinds of
data. It's use augments ``props`` and ``labels`` by returning a list
containing both types, but possibly limited by element type ('pores'
or 'throats'.)
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic([5, 5, 5])
>>> pn.keys(mode='props') # Get all props
['pore.coords', 'throat.conns']
>>> pn.keys(mode='props', element='pore') # Get only pore props
['pore.coords'] |
def mail_message(smtp_server, message, from_address, rcpt_addresses):
"""
Send mail using smtp.
"""
if smtp_server[0] == '/':
# Sending the message with local sendmail
p = os.popen(smtp_server, 'w')
p.write(message)
p.close()
else:
# Sending the message using a smtp server
import smtplib
server = smtplib.SMTP(smtp_server)
server.sendmail(from_address, rcpt_addresses, message)
server.quit() | Send mail using smtp. |
def pre(*content, sep='\n'):
"""
Make mono-width text block (Markdown)
:param content:
:param sep:
:return:
"""
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[3]) | Make mono-width text block (Markdown)
:param content:
:param sep:
:return: |
def lookup(cls, backend, obj):
"""
Given an object, lookup the corresponding customized option
tree if a single custom tree is applicable.
"""
ids = set([el for el in obj.traverse(lambda x: x.id) if el is not None])
if len(ids) == 0:
raise Exception("Object does not own a custom options tree")
elif len(ids) != 1:
idlist = ",".join([str(el) for el in sorted(ids)])
raise Exception("Object contains elements combined across "
"multiple custom trees (ids %s)" % idlist)
return cls._custom_options[backend][list(ids)[0]] | Given an object, lookup the corresponding customized option
tree if a single custom tree is applicable. |
def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx):
"""Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func): function converting a longitude to the (fractional) column index in the map
Returns:
np.array: mask, background is zero, foreground is one
"""
import matplotlib
# specify 'agg' renderer, Mac renderer does not support what we want to do below
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
# we can only do polygons right now
if feature.geometry.type not in ('Polygon', 'MultiPolygon'):
raise ValueError("Cannot handle feature of type " + feature.geometry.type)
# fictional dpi - don't matter in the end
dpi = 100
# -- start documentation include: poly-setup
# make a new figure with no frame, no axes, with the correct size, black background
fig = plt.figure(frameon=False, dpi=dpi, )
fig.set_size_inches(shape[1] / float(dpi), shape[0] / float(dpi))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
# noinspection PyTypeChecker
ax.set_xlim([0, shape[1]])
# noinspection PyTypeChecker
ax.set_ylim([0, shape[0]])
fig.add_axes(ax)
# -- end documentation include: poly-setup
# for normal polygons make coordinates iterable
if feature.geometry.type == 'Polygon':
coords = [feature.geometry.coordinates]
else:
coords = feature.geometry.coordinates
for poly_coords in coords:
# the polygon may contain multiple outlines; the first is
# always the outer one, the others are 'holes'
for i, outline in enumerate(poly_coords):
# inside/outside fill value: figure background is white by
# default, draw inverted polygon and invert again later
value = 0. if i == 0 else 1.
# convert lats/lons to row/column indices in the array
outline = np.array(outline)
xs = lon_idx(outline[:, 0])
ys = lat_idx(outline[:, 1])
# draw the polygon
poly = patches.Polygon(list(zip(xs, ys)),
facecolor=(value, value, value),
edgecolor='none',
antialiased=True)
ax.add_patch(poly)
# -- start documentation include: poly-extract
# extract the figure to a numpy array,
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
# reshape to a proper numpy array, keep one channel only
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))[:, :, 0]
# -- end documentation include: poly-extract
# make sure we get the right shape back
assert data.shape[0] == shape[0]
assert data.shape[1] == shape[1]
# convert from uints back to floats and invert to get black background
data = 1. - data.astype(float) / 255. # type: np.array
# image is flipped horizontally w.r.t. map
data = data[::-1, :]
# done, clean up
plt.close('all')
return data | Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func): function converting a longitude to the (fractional) column index in the map
Returns:
np.array: mask, background is zero, foreground is one |
def mark_error(self, dispatch, error_log, message_cls):
"""Marks a dispatch as having error or consequently as failed
if send retry limit for that message type is exhausted.
Should be used within send().
:param Dispatch dispatch: a Dispatch
:param str error_log: error message
:param MessageBase message_cls: MessageBase heir
"""
if message_cls.send_retry_limit is not None and (dispatch.retry_count + 1) >= message_cls.send_retry_limit:
self.mark_failed(dispatch, error_log)
else:
dispatch.error_log = error_log
self._st['error'].append(dispatch) | Marks a dispatch as having error or consequently as failed
if send retry limit for that message type is exhausted.
Should be used within send().
:param Dispatch dispatch: a Dispatch
:param str error_log: error message
:param MessageBase message_cls: MessageBase heir |
def lookup(values, name=None):
"""
Creates the grammar for a Lookup (L) field, accepting only values from a
list.
Like in the Alphanumeric field, the result will be stripped of all heading
and trailing whitespaces.
:param values: values allowed
:param name: name for the field
:return: grammar for the lookup field
"""
if name is None:
name = 'Lookup Field'
if values is None:
raise ValueError('The values can no be None')
# TODO: This should not be needed, it is just a patch. Fix this.
try:
v = values.asList()
values = v
except AttributeError:
values = values
# Only the specified values are allowed
lookup_field = pp.oneOf(values)
lookup_field.setName(name)
lookup_field.setParseAction(lambda s: s[0].strip())
lookup_field.leaveWhitespace()
return lookup_field | Creates the grammar for a Lookup (L) field, accepting only values from a
list.
Like in the Alphanumeric field, the result will be stripped of all heading
and trailing whitespaces.
:param values: values allowed
:param name: name for the field
:return: grammar for the lookup field |
def _call(self, x, out=None):
"""Multiply ``x`` and write to ``out`` if given."""
if out is None:
return x * self.multiplicand
elif not self.__range_is_field:
if self.__domain_is_field:
out.lincomb(x, self.multiplicand)
else:
out.assign(self.multiplicand * x)
else:
raise ValueError('can only use `out` with `LinearSpace` range') | Multiply ``x`` and write to ``out`` if given. |
def u2ver(self):
"""
Get the major/minor version of the urllib2 lib.
@return: The urllib2 version.
@rtype: float
"""
try:
part = urllib2.__version__.split('.', 1)
return float('.'.join(part))
except Exception, e:
log.exception(e)
return 0 | Get the major/minor version of the urllib2 lib.
@return: The urllib2 version.
@rtype: float |
def callback(msg, _):
"""Callback function called by libnl upon receiving messages from the kernel.
Positional arguments:
msg -- nl_msg class instance containing the data sent by the kernel.
Returns:
An integer, value of NL_OK. It tells libnl to proceed with processing the next kernel message.
"""
# First convert `msg` into something more manageable.
nlh = nlmsg_hdr(msg)
iface = ifinfomsg(nlmsg_data(nlh))
hdr = IFLA_RTA(iface)
remaining = ctypes.c_int(nlh.nlmsg_len - NLMSG_LENGTH(iface.SIZEOF))
# Now iterate through each rtattr stored in `iface`.
while RTA_OK(hdr, remaining):
# Each rtattr (which is what hdr is) instance is only one type. Looping through all of them until we run into
# the ones we care about.
if hdr.rta_type == IFLA_IFNAME:
print('Found network interface {0}: {1}'.format(iface.ifi_index, get_string(RTA_DATA(hdr)).decode('ascii')))
hdr = RTA_NEXT(hdr, remaining)
return NL_OK | Callback function called by libnl upon receiving messages from the kernel.
Positional arguments:
msg -- nl_msg class instance containing the data sent by the kernel.
Returns:
An integer, value of NL_OK. It tells libnl to proceed with processing the next kernel message. |
def find_templates(input_dir):
"""
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
"""
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates | _find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates |
def _handle_result(self, result):
"""Mark the result as completed, insert the `CompiledResultNode` into
the manifest, and mark any descendants (potentially with a 'cause' if
the result was an ephemeral model) as skipped.
"""
is_ephemeral = result.node.is_ephemeral_model
if not is_ephemeral:
self.node_results.append(result)
node = CompileResultNode(**result.node)
node_id = node.unique_id
self.manifest.nodes[node_id] = node
if result.error is not None:
if is_ephemeral:
cause = result
else:
cause = None
self._mark_dependent_errors(node_id, result, cause) | Mark the result as completed, insert the `CompiledResultNode` into
the manifest, and mark any descendants (potentially with a 'cause' if
the result was an ephemeral model) as skipped. |
def fill(self, color, start=0, end=-1):
"""Fill the entire strip with RGB color tuple"""
start = max(start, 0)
if end < 0 or end >= self.numLEDs:
end = self.numLEDs - 1
for led in range(start, end + 1): # since 0-index include end in range
self._set_base(led, color) | Fill the entire strip with RGB color tuple |
def set_widgets(self):
"""Set widgets on the Source tab."""
# Just set values based on existing keywords
source = self.parent.get_existing_keyword('source')
if source or source == 0:
self.leSource.setText(source)
else:
self.leSource.clear()
source_scale = self.parent.get_existing_keyword('scale')
if source_scale or source_scale == 0:
self.leSource_scale.setText(source_scale)
else:
self.leSource_scale.clear()
source_date = self.parent.get_existing_keyword('date')
if source_date:
self.ckbSource_date.setChecked(True)
self.dtSource_date.setDateTime(source_date)
else:
self.ckbSource_date.setChecked(False)
self.dtSource_date.clear()
source_url = self.parent.get_existing_keyword('url')
try:
source_url = source_url.toString()
except AttributeError:
pass
if source_url or source_url == 0:
self.leSource_url.setText(source_url)
else:
self.leSource_url.clear()
source_license = self.parent.get_existing_keyword('license')
if source_license or source_license == 0:
self.leSource_license.setText(source_license)
else:
self.leSource_license.clear() | Set widgets on the Source tab. |
def find_files(self):
"""
Find all file paths for publishing, yield (urlname, kwargs)
"""
# yield blueprint paths first
if getattr(self, 'blueprint_name', None):
for path in walk_directory(os.path.join(self.path, self.blueprint_name), ignore=self.project.EXCLUDES):
yield 'preview', {'path': path}
# then yield project paths
for path in walk_directory(self.path, ignore=self.project.EXCLUDES):
yield 'preview', {'path': path} | Find all file paths for publishing, yield (urlname, kwargs) |
def get_value(self, spreadsheet_id: str, range_name: str) -> dict:
"""
get value by range
:param spreadsheet_id:
:param range_name:
:return:
"""
service = self.__get_service()
result = service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=range_name).execute()
return result.get('values', []) | get value by range
:param spreadsheet_id:
:param range_name:
:return: |
def get_hmac(password):
"""Returns a Base64 encoded HMAC+SHA512 of the password signed with
the salt specified by ``SECURITY_PASSWORD_SALT``.
:param password: The password to sign
"""
salt = _security.password_salt
if salt is None:
raise RuntimeError(
'The configuration value `SECURITY_PASSWORD_SALT` must '
'not be None when the value of `SECURITY_PASSWORD_HASH` is '
'set to "%s"' % _security.password_hash)
h = hmac.new(encode_string(salt), encode_string(password), hashlib.sha512)
return base64.b64encode(h.digest()) | Returns a Base64 encoded HMAC+SHA512 of the password signed with
the salt specified by ``SECURITY_PASSWORD_SALT``.
:param password: The password to sign |
def preprocess(string):
"""
Preprocess string to transform all diacritics and remove other special characters than appropriate
:param string:
:return:
"""
string = unicode(string, encoding="utf-8")
return regex.sub('', string).encode('utf-8') | Preprocess string to transform all diacritics and remove other special characters than appropriate
:param string:
:return: |
def most_read_creators_card(num=10):
"""
Displays a card showing the Creators who have the most Readings
associated with their Publications.
In spectator_core tags, rather than spectator_reading so it can still be
used on core pages, even if spectator_reading isn't installed.
"""
if spectator_apps.is_enabled('reading'):
object_list = most_read_creators(num=num)
object_list = chartify(object_list, 'num_readings', cutoff=1)
return {
'card_title': 'Most read authors',
'score_attr': 'num_readings',
'object_list': object_list,
} | Displays a card showing the Creators who have the most Readings
associated with their Publications.
In spectator_core tags, rather than spectator_reading so it can still be
used on core pages, even if spectator_reading isn't installed. |
def to_datetime(date_or_datetime):
"""
Convert a date object to a datetime object,
or return as it is if it's not a date object.
:param date_or_datetime: date or datetime object
:return: a datetime object
"""
if isinstance(date_or_datetime, date) and \
not isinstance(date_or_datetime, datetime):
d = date_or_datetime
return datetime.strptime(
'%04d-%02d-%02d' % (d.year, d.month, d.day), '%Y-%m-%d')
return date_or_datetime | Convert a date object to a datetime object,
or return as it is if it's not a date object.
:param date_or_datetime: date or datetime object
:return: a datetime object |
def profiler(sorting=('tottime',), stripDirs=True,
limit=20, path='', autoclean=True):
"""
Creates a profile wrapper around a method to time out
all the operations that it runs through. For more
information, look into the hotshot Profile documentation
online for the built-in Python package.
:param sorting <tuple> ( <key>, .. )
:param stripDirs <bool>
:param limit <int>
:param path <str>
:param autoclean <bool>
:usage |from projex.decorators import profiler
|
|class A:
| @profiler() # must be called as a method
| def increment(amount, count = 1):
| return amount + count
|
|a = A()
|a.increment(10)
|
"""
def decorated(func):
""" Wrapper function to handle the profiling options. """
# create a call to the wrapping
@wraps(func)
def wrapped(*args, **kwds):
""" Inner method for calling the profiler method. """
# define the profile name
filename = os.path.join(path, '%s.prof' % func.__name__)
# create a profiler for the method to run through
prof = hotshot.Profile(filename)
results = prof.runcall(func, *args, **kwds)
prof.close()
# log the information about it
stats = hotshot.stats.load(filename)
if stripDirs:
stats.strip_dirs()
# we don't want to know about the arguments for this method
stats.sort_stats(*sorting)
stats.print_stats(limit)
# remove the file if desired
if autoclean:
os.remove(filename)
return results
return wrapped
return decorated | Creates a profile wrapper around a method to time out
all the operations that it runs through. For more
information, look into the hotshot Profile documentation
online for the built-in Python package.
:param sorting <tuple> ( <key>, .. )
:param stripDirs <bool>
:param limit <int>
:param path <str>
:param autoclean <bool>
:usage |from projex.decorators import profiler
|
|class A:
| @profiler() # must be called as a method
| def increment(amount, count = 1):
| return amount + count
|
|a = A()
|a.increment(10)
| |
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv | Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set. |
def get_ISSNs(self):
"""
Get list of VALID ISSNs (``022a``).
Returns:
list: List with *valid* ISSN strings.
"""
invalid_issns = set(self.get_invalid_ISSNs())
return [
self._clean_isbn(issn)
for issn in self["022a"]
if self._clean_isbn(issn) not in invalid_issns
] | Get list of VALID ISSNs (``022a``).
Returns:
list: List with *valid* ISSN strings. |
def compute_samples(channels, nsamples=None):
'''
create a generator which computes the samples.
essentially it creates a sequence of the sum of each function in the channel
at each sample in the file for each channel.
'''
return islice(izip(*(imap(sum, izip(*channel)) for channel in channels)), nsamples) | create a generator which computes the samples.
essentially it creates a sequence of the sum of each function in the channel
at each sample in the file for each channel. |
def grid_str(self, path=None, start=None, end=None,
border=True, start_chr='s', end_chr='e',
path_chr='x', empty_chr=' ', block_chr='#',
show_weight=False):
"""
create a printable string from the grid using ASCII characters
:param path: list of nodes that show the path
:param start: start node
:param end: end node
:param border: create a border around the grid
:param start_chr: character for the start (default "s")
:param end_chr: character for the destination (default "e")
:param path_chr: character to show the path (default "x")
:param empty_chr: character for empty fields (default " ")
:param block_chr: character for blocking elements (default "#")
:param show_weight: instead of empty_chr show the cost of each empty
field (shows a + if the value of weight is > 10)
:return:
"""
data = ''
if border:
data = '+{}+'.format('-'*len(self.nodes[0]))
for y in range(len(self.nodes)):
line = ''
for x in range(len(self.nodes[y])):
node = self.nodes[y][x]
if node == start:
line += start_chr
elif node == end:
line += end_chr
elif path and ((node.x, node.y) in path or node in path):
line += path_chr
elif node.walkable:
# empty field
weight = str(node.weight) if node.weight < 10 else '+'
line += weight if show_weight else empty_chr
else:
line += block_chr # blocked field
if border:
line = '|'+line+'|'
if data:
data += '\n'
data += line
if border:
data += '\n+{}+'.format('-'*len(self.nodes[0]))
return data | create a printable string from the grid using ASCII characters
:param path: list of nodes that show the path
:param start: start node
:param end: end node
:param border: create a border around the grid
:param start_chr: character for the start (default "s")
:param end_chr: character for the destination (default "e")
:param path_chr: character to show the path (default "x")
:param empty_chr: character for empty fields (default " ")
:param block_chr: character for blocking elements (default "#")
:param show_weight: instead of empty_chr show the cost of each empty
field (shows a + if the value of weight is > 10)
:return: |
def field2length(self, field, **kwargs):
"""Return the dictionary of OpenAPI field attributes for a set of
:class:`Length <marshmallow.validators.Length>` validators.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
validators = [
validator
for validator in field.validators
if (
hasattr(validator, "min")
and hasattr(validator, "max")
and hasattr(validator, "equal")
)
]
is_array = isinstance(
field, (marshmallow.fields.Nested, marshmallow.fields.List)
)
min_attr = "minItems" if is_array else "minLength"
max_attr = "maxItems" if is_array else "maxLength"
for validator in validators:
if validator.min is not None:
if hasattr(attributes, min_attr):
attributes[min_attr] = max(attributes[min_attr], validator.min)
else:
attributes[min_attr] = validator.min
if validator.max is not None:
if hasattr(attributes, max_attr):
attributes[max_attr] = min(attributes[max_attr], validator.max)
else:
attributes[max_attr] = validator.max
for validator in validators:
if validator.equal is not None:
attributes[min_attr] = validator.equal
attributes[max_attr] = validator.equal
return attributes | Return the dictionary of OpenAPI field attributes for a set of
:class:`Length <marshmallow.validators.Length>` validators.
:param Field field: A marshmallow field.
:rtype: dict |
def libvlc_video_set_logo_int(p_mi, option, value):
'''Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value.
'''
f = _Cfunctions.get('libvlc_video_set_logo_int', None) or \
_Cfunction('libvlc_video_set_logo_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, value) | Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value. |
def select_name(source, name):
'''
Yields all the elements with the given name
source - if an element, starts with all child elements in order; can also be any other iterator
name - will yield only elements with this name
'''
return filter(lambda x: x.xml_name == name, select_elements(source)) | Yields all the elements with the given name
source - if an element, starts with all child elements in order; can also be any other iterator
name - will yield only elements with this name |
def with_preference_param(self):
"""Add the preference param to the ES request and return a new Search.
The preference param avoids the bouncing effect with multiple
replicas, documented on ES documentation.
See: https://www.elastic.co/guide/en/elasticsearch/guide/current
/_search_options.html#_preference for more information.
"""
user_hash = self._get_user_hash()
if user_hash:
return self.params(preference=user_hash)
return self | Add the preference param to the ES request and return a new Search.
The preference param avoids the bouncing effect with multiple
replicas, documented on ES documentation.
See: https://www.elastic.co/guide/en/elasticsearch/guide/current
/_search_options.html#_preference for more information. |
def glow_hparams():
"""Glow Hparams."""
hparams = common_hparams.basic_params1()
hparams.clip_grad_norm = None
hparams.weight_decay = 0.0
hparams.learning_rate_constant = 3e-4
hparams.batch_size = 32
# can be prev_level, prev_step or normal.
# see: glow_ops.merge_level_and_latent_dist
hparams.add_hparam("level_scale", "prev_level")
hparams.add_hparam("n_levels", 3)
hparams.add_hparam("n_bits_x", 8)
hparams.add_hparam("depth", 32)
# Activation - Relu or Gatu
hparams.add_hparam("activation", "relu")
# Coupling layer, additive or affine.
hparams.add_hparam("coupling", "affine")
hparams.add_hparam("coupling_width", 512)
hparams.add_hparam("coupling_dropout", 0.0)
hparams.add_hparam("top_prior", "single_conv")
# init_batch_size denotes the number of examples used for data-dependent
# initialization. A higher init_batch_size is required for training
# stability especially when hparams.batch_size is low.
hparams.add_hparam("init_batch_size", 256)
hparams.add_hparam("temperature", 1.0)
return hparams | Glow Hparams. |
def validate_name(self, name, agent):
"""
Finds the modification of name which is not yet in the list
:param name: the (new) name for the agent
:param agent: the agent instance to allow the same name as the previous one if necessary
:return: the best modification of name not yet in a listwidget
"""
if name in self.bot_names_to_agent_dict and self.bot_names_to_agent_dict[name] is not agent:
i = 0
while True:
if name + " (" + str(i) + ")" in self.bot_names_to_agent_dict and \
self.bot_names_to_agent_dict[name + " (" + str(i) + ")"] is not agent:
i += 1
else:
value = name + " (" + str(i) + ")"
return value
else:
return name | Finds the modification of name which is not yet in the list
:param name: the (new) name for the agent
:param agent: the agent instance to allow the same name as the previous one if necessary
:return: the best modification of name not yet in a listwidget |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.