code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def as_string(self, chars, current_linkable=False, class_current="active_link"):
"""
It returns menu as string
"""
return self.__do_menu("as_string", current_linkable, class_current, chars) | It returns menu as string |
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name | Return full package/distribution name, w/version |
def get_dash(self):
"""Return the current dash pattern.
:returns:
A ``(dashes, offset)`` tuple of a list and a float.
:obj:`dashes` is a list of floats,
empty if no dashing is in effect.
"""
dashes = ffi.new('double[]', cairo.cairo_get_dash_count(self._pointer))
offset = ffi.new('double *')
cairo.cairo_get_dash(self._pointer, dashes, offset)
self._check_status()
return list(dashes), offset[0] | Return the current dash pattern.
:returns:
A ``(dashes, offset)`` tuple of a list and a float.
:obj:`dashes` is a list of floats,
empty if no dashing is in effect. |
def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names | Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed. |
def hkdf(self, chaining_key, input_key_material, dhlen=64):
"""Hash-based key derivation function
Takes a ``chaining_key'' byte sequence of len HASHLEN, and an
``input_key_material'' byte sequence with length either zero
bytes, 32 bytes or dhlen bytes.
Returns two byte sequences of length HASHLEN"""
if len(chaining_key) != self.HASHLEN:
raise HashError("Incorrect chaining key length")
if len(input_key_material) not in (0, 32, dhlen):
raise HashError("Incorrect input key material length")
temp_key = self.hmac_hash(chaining_key, input_key_material)
output1 = self.hmac_hash(temp_key, b'\x01')
output2 = self.hmac_hash(temp_key, output1 + b'\x02')
return output1, output2 | Hash-based key derivation function
Takes a ``chaining_key'' byte sequence of len HASHLEN, and an
``input_key_material'' byte sequence with length either zero
bytes, 32 bytes or dhlen bytes.
Returns two byte sequences of length HASHLEN |
def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): # pragma: no cover
"""Use a Ray task to read columns from HDF5 into a Pandas DataFrame.
Note: Ray functions are not detected by codecov (thus pragma: no cover)
Args:
path_or_buf: The path of the HDF5 file.
columns: The list of column names to read.
num_splits: The number of partitions to split the column into.
Returns:
A list containing the split Pandas DataFrames and the Index as the last
element. If there is not `index_col` set, then we just return the length.
This is used to determine the total length of the DataFrame to build a
default Index.
"""
df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs)
# Append the length of the index here to build it externally
return _split_result_for_readers(0, num_splits, df) + [len(df.index)] | Use a Ray task to read columns from HDF5 into a Pandas DataFrame.
Note: Ray functions are not detected by codecov (thus pragma: no cover)
Args:
path_or_buf: The path of the HDF5 file.
columns: The list of column names to read.
num_splits: The number of partitions to split the column into.
Returns:
A list containing the split Pandas DataFrames and the Index as the last
element. If there is not `index_col` set, then we just return the length.
This is used to determine the total length of the DataFrame to build a
default Index. |
def create_weekmatrices(user, split_interval=60):
"""
Computes raw indicators (e.g. number of outgoing calls) for intervals of ~1
hour across each week of user data. These "week-matrices" are returned in a
nested list with each sublist containing [user.name, channel, weekday,
section, value].
Parameters
----------
user : object
The user to create week-matrices for.
split_interval : int
The interval in minutes for which each indicator is computed. Defaults to 60.
Needs to be able to split a day (24*60 minutes) evenly.
"""
if not float(24 * 60 / split_interval).is_integer():
raise ValueError(
"The minute interval set for the week-matrix structure does not evenly divide the day!")
contacts_in = partial(bc.individual.number_of_contacts,
direction='in', interaction='callandtext', summary=None)
contacts_out = partial(bc.individual.number_of_contacts,
direction='out', interaction='callandtext', summary=None)
calls_in = partial(bc.individual.number_of_interactions,
direction='in', interaction='call', summary=None)
calls_out = partial(bc.individual.number_of_interactions,
direction='out', interaction='call', summary=None)
texts_in = partial(bc.individual.number_of_interactions,
direction='in', interaction='text', summary=None)
texts_out = partial(bc.individual.number_of_interactions,
direction='out', interaction='text', summary=None)
time_spent_in = partial(bc.individual.call_duration,
direction='in', interaction='call', summary=None)
time_spent_out = partial(bc.individual.call_duration,
direction='out', interaction='call', summary=None)
core_func = [
(contacts_in, "scalar"),
(contacts_out, "scalar"),
(calls_in, "scalar"),
(calls_out, "scalar"),
(texts_in, "scalar"),
(texts_out, "scalar")
]
time_func = [
(time_spent_in, "summarystats"),
(time_spent_out, "summarystats")
]
wm = []
sections = [
(i + 1) * split_interval for i in range(7 * 24 * 60 // split_interval)]
temp_user = _extract_user_info(user)
for grouped_records in group_records(user.records, groupby='week'):
week_records = list(grouped_records)
time_spent_rec = _transform_to_time_spent(
week_records, split_interval, sections)
wm.extend(_calculate_channels(
week_records, sections, split_interval, core_func, temp_user))
wm.extend(_calculate_channels(
time_spent_rec, sections, split_interval, time_func, temp_user, len(core_func)))
return wm | Computes raw indicators (e.g. number of outgoing calls) for intervals of ~1
hour across each week of user data. These "week-matrices" are returned in a
nested list with each sublist containing [user.name, channel, weekday,
section, value].
Parameters
----------
user : object
The user to create week-matrices for.
split_interval : int
The interval in minutes for which each indicator is computed. Defaults to 60.
Needs to be able to split a day (24*60 minutes) evenly. |
def check_install():
"""
Try to detect the two most common installation errors:
1. Installing on macOS using a Homebrew version of Python
2. Installing on Linux using Python 2 when GDB is linked with Python 3
"""
if platform.system() == 'Darwin' and sys.executable != '/usr/bin/python':
print("*" * 79)
print(textwrap.fill(
"WARNING: You are not using the version of Python included with "
"macOS. If you intend to use Voltron with the LLDB included "
"with Xcode, or GDB installed with Homebrew, it will not work "
"unless it is installed using the system's default Python. If "
"you intend to use Voltron with a debugger installed by some "
"other method, it may be safe to ignore this warning. See the "
"following documentation for more detailed installation "
"instructions: "
"https://github.com/snare/voltron/wiki/Installation", 79))
print("*" * 79)
elif platform.system() == 'Linux':
try:
output = check_output([
"gdb", "-batch", "-q", "--nx", "-ex",
"pi print(sys.version_info.major)"
]).decode("utf-8")
gdb_python = int(output)
if gdb_python != sys.version_info.major:
print("*" * 79)
print(textwrap.fill(
"WARNING: You are installing Voltron using Python {0}.x "
"and GDB is linked with Python {1}.x. GDB will not be "
"able to load Voltron. Please install using Python {1} "
"if you intend to use Voltron with the copy of GDB that "
"is installed. See the following documentation for more "
"detailed installation instructions: "
"https://github.com/snare/voltron/wiki/Installation"
.format(sys.version_info.major, gdb_python), 79))
print("*" * 79)
except:
pass | Try to detect the two most common installation errors:
1. Installing on macOS using a Homebrew version of Python
2. Installing on Linux using Python 2 when GDB is linked with Python 3 |
def mv(source, target):
''' Move synchronized directory. '''
if os.path.isfile(target) and len(source) == 1:
if click.confirm("Are you sure you want to overwrite %s?" % target):
err_msg = cli_syncthing_adapter.mv_edge_case(source, target)
# Edge case: to match Bash 'mv' behavior and overwrite file
if err_msg:
click.echo(err_msg)
return
if len(source) > 1 and not os.path.isdir(target):
click.echo(click.get_current_context().get_help())
return
else:
err_msg, err = cli_syncthing_adapter.mv(source, target)
if err_msg:
click.echo(err_msg, err) | Move synchronized directory. |
def waitForCreation(self, timeout=10, notification='AXCreated'):
"""Convenience method to wait for creation of some UI element.
Returns: The element created
"""
callback = AXCallbacks.returnElemCallback
retelem = None
args = (retelem,)
return self.waitFor(timeout, notification, callback=callback,
args=args) | Convenience method to wait for creation of some UI element.
Returns: The element created |
def properties(self):
"""
Property for accessing :class:`PropertyManager` instance, which is used to manage properties of the jobs.
:rtype: yagocd.resources.property.PropertyManager
"""
if self._property_manager is None:
self._property_manager = PropertyManager(session=self._session)
return self._property_manager | Property for accessing :class:`PropertyManager` instance, which is used to manage properties of the jobs.
:rtype: yagocd.resources.property.PropertyManager |
def _make_jsmin(python_only=False):
"""
Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable``
"""
# pylint: disable = R0912, R0914, W0612
if not python_only:
try:
import _rjsmin # pylint: disable = F0401
except ImportError:
pass
else:
return _rjsmin.jsmin
try:
xrange
except NameError:
xrange = range # pylint: disable = W0622
space_chars = r'[\000-\011\013\014\016-\040]'
line_comment = r'(?://[^\r\n]*)'
space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
space_comment_nobang = r'(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/)'
bang_comment = r'(?:/\*![^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
nospecial = r'[^/\\\[\r\n]'
regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
nospecial, charclass, nospecial
)
space = r'(?:%s|%s)' % (space_chars, space_comment)
space_nobang = r'(?:%s|%s)' % (space_chars, space_comment_nobang)
newline = r'(?:%s?[\r\n])' % line_comment
def fix_charclass(result):
""" Fixup string of chars to fit into a regex char class """
pos = result.find('-')
if pos >= 0:
result = r'%s%s-' % (result[:pos], result[pos + 1:])
def sequentize(string):
"""
Notate consecutive characters as sequence
(1-4 instead of 1234)
"""
first, last, result = None, None, []
for char in map(ord, string):
if last is None:
first = last = char
elif last + 1 == char:
last = char
else:
result.append((first, last))
first = last = char
if last is not None:
result.append((first, last))
return ''.join(['%s%s%s' % (
chr(first),
last > first + 1 and '-' or '',
last != first and chr(last) or ''
) for first, last in result])
return _re.sub(
r'([\000-\040\047])', # \047 for better portability
lambda m: '\\%03o' % ord(m.group(1)), (
sequentize(result)
.replace('\\', '\\\\')
.replace('[', '\\[')
.replace(']', '\\]')
)
)
def id_literal_(what):
""" Make id_literal like char class """
match = _re.compile(what).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return '[^%s]' % fix_charclass(result)
def not_id_literal_(keep):
""" Make negated id_literal like char class """
match = _re.compile(id_literal_(keep)).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return r'[%s]' % fix_charclass(result)
not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
preregex1 = r'[(,=:\[!&|?{};\r\n]'
preregex2 = r'%(not_id_literal)sreturn' % locals()
id_literal = id_literal_(r'[a-zA-Z0-9_$]')
id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(!+-]')
id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
dull = r'[^\047"/\000-\040]'
space_sub_simple = _re.compile((
# noqa pylint: disable = C0330
r'(%(dull)s+)'
r'|(%(strings)s%(dull)s*)'
r'|(?<=%(preregex1)s)'
r'%(space)s*(?:%(newline)s%(space)s*)*'
r'(%(regex)s%(dull)s*)'
r'|(?<=%(preregex2)s)'
r'%(space)s*(?:%(newline)s%(space)s)*'
r'(%(regex)s%(dull)s*)'
r'|(?<=%(id_literal_close)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)+'
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
r'|(?<=\+)(%(space)s)+(?=\+)'
r'|(?<=-)(%(space)s)+(?=-)'
r'|%(space)s+'
r'|(?:%(newline)s%(space)s*)+'
) % locals()).sub
#print space_sub_simple.__self__.pattern
def space_subber_simple(match):
""" Substitution callback """
# pylint: disable = R0911
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1]
elif groups[2]:
return groups[2]
elif groups[3]:
return groups[3]
elif groups[4]:
return '\n'
elif groups[5] or groups[6] or groups[7]:
return ' '
else:
return ''
space_sub_banged = _re.compile((
# noqa pylint: disable = C0330
r'(%(dull)s+)'
r'|(%(strings)s%(dull)s*)'
r'|(%(bang_comment)s%(dull)s*)'
r'|(?<=%(preregex1)s)'
r'%(space)s*(?:%(newline)s%(space)s*)*'
r'(%(regex)s%(dull)s*)'
r'|(?<=%(preregex2)s)'
r'%(space)s*(?:%(newline)s%(space)s)*'
r'(%(regex)s%(dull)s*)'
r'|(?<=%(id_literal_close)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)+'
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
r'|(?<=\+)(%(space)s)+(?=\+)'
r'|(?<=-)(%(space)s)+(?=-)'
r'|%(space)s+'
r'|(?:%(newline)s%(space)s*)+'
) % dict(locals(), space=space_nobang)).sub
#print space_sub_banged.__self__.pattern
def space_subber_banged(match):
""" Substitution callback """
# pylint: disable = R0911
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1]
elif groups[2]:
return groups[2]
elif groups[3]:
return groups[3]
elif groups[4]:
return groups[4]
elif groups[5]:
return '\n'
elif groups[6] or groups[7] or groups[8]:
return ' '
else:
return ''
def jsmin(script, keep_bang_comments=False): # pylint: disable = W0621
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
`keep_bang_comments` : ``bool``
Keep comments starting with an exclamation mark? (``/*!...*/``)
:Return: Minified script
:Rtype: ``str``
"""
if keep_bang_comments:
return space_sub_banged(
space_subber_banged, '\n%s\n' % script
).strip()
else:
return space_sub_simple(
space_subber_simple, '\n%s\n' % script
).strip()
return jsmin | Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable`` |
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
# filled_length = int(length * iteration // total)
# bar = fill * filled_length + '-' * (length - filled_length)
# print('\r %s |%s| %s %s' % (prefix, bar, percent, suffix), end='\r')
print(percent)
# Print New Line on Complete
if iteration == total:
print() | Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str) |
def QWidget_factory(ui_file=None, *args, **kwargs):
"""
Defines a class factory creating `QWidget <http://doc.qt.nokia.com/qwidget.html>`_ classes
using given ui file.
:param ui_file: Ui file.
:type ui_file: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: QWidget class.
:rtype: QWidget
"""
file = ui_file or DEFAULT_UI_FILE
if not foundations.common.path_exists(file):
raise foundations.exceptions.FileExistsError("{0} | '{1}' ui file doesn't exists!".format(__name__, file))
Form, Base = uic.loadUiType(file)
class QWidget(Form, Base):
"""
Derives from :def:`QWidget_factory` class factory definition.
"""
def __init__(self, *args, **kwargs):
"""
Initializes the class.
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
super(QWidget, self).__init__(*args, **kwargs)
self.__ui_file = file
self.__geometry = None
self.setupUi(self)
@property
def ui_file(self):
"""
Property for **self.__ui_file** attribute.
:return: self.__ui_file.
:rtype: unicode
"""
return self.__ui_file
@ui_file.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_file(self, value):
"""
Setter for **self.__ui_file** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is read only!".format(
self.__class__.__name__, "ui_file"))
@ui_file.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_file(self):
"""
Deleter for **self.__ui_file** attribute.
"""
raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is not deletable!".format(
self.__class__.__name__, "ui_file"))
def show(self, setGeometry=True):
"""
Reimplements the :meth:`QWidget.show` method.
:param setGeometry: Set geometry.
:type setGeometry: bool
"""
if not setGeometry:
super(QWidget, self).show()
return
wasHidden = not self.isVisible()
if self.__geometry is None and wasHidden:
center_widget_on_screen(self)
super(QWidget, self).show()
if self.__geometry is not None and wasHidden:
self.restoreGeometry(self.__geometry)
def closeEvent(self, event):
"""
Reimplements the :meth:`QWidget.closeEvent` method.
:param event: QEvent.
:type event: QEvent
"""
self.__geometry = self.saveGeometry()
event.accept()
return QWidget | Defines a class factory creating `QWidget <http://doc.qt.nokia.com/qwidget.html>`_ classes
using given ui file.
:param ui_file: Ui file.
:type ui_file: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: QWidget class.
:rtype: QWidget |
def remove(self, fieldspec):
"""
Removes fields or subfields according to `fieldspec`.
If a non-control field subfield removal leaves no other subfields,
delete the field entirely.
"""
pattern = r'(?P<field>[^.]+)(.(?P<subfield>[^.]+))?'
match = re.match(pattern, fieldspec)
if not match:
return None
grp = match.groupdict()
for field in self.get_fields(grp['field']):
if grp['subfield']:
updated = []
for code, value in pairwise(field.subfields):
if not code == grp['subfield']:
updated += [code, value]
# if we removed the last subfield entry,
# remove the whole field, too
if not updated:
self.remove_field(field)
else:
field.subfields = updated
else:
# it is a control field
self.remove_field(field) | Removes fields or subfields according to `fieldspec`.
If a non-control field subfield removal leaves no other subfields,
delete the field entirely. |
def lockToColumn(self, index):
"""
Sets the column that the tree view will lock to. If None is supplied,
then locking will be removed.
:param index | <int> || None
"""
self._lockColumn = index
if index is None:
self.__destroyLockedView()
return
else:
if not self._lockedView:
view = QtGui.QTreeView(self.parent())
view.setModel(self.model())
view.setSelectionModel(self.selectionModel())
view.setItemDelegate(self.itemDelegate())
view.setFrameShape(view.NoFrame)
view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
view.setRootIsDecorated(self.rootIsDecorated())
view.setUniformRowHeights(True)
view.setFocusProxy(self)
view.header().setFocusProxy(self.header())
view.setStyleSheet(self.styleSheet())
view.setAutoScroll(False)
view.setSortingEnabled(self.isSortingEnabled())
view.setPalette(self.palette())
view.move(self.x(), self.y())
self.setAutoScroll(False)
self.setUniformRowHeights(True)
view.collapsed.connect(self.collapse)
view.expanded.connect(self.expand)
view.expanded.connect(self.__updateLockedView)
view.collapsed.connect(self.__updateLockedView)
view_head = view.header()
for i in range(self.columnCount()):
view_head.setResizeMode(i, self.header().resizeMode(i))
view.header().sectionResized.connect(self.__updateStandardSection)
self.header().sectionResized.connect(self.__updateLockedSection)
vbar = view.verticalScrollBar()
self.verticalScrollBar().valueChanged.connect(vbar.setValue)
self._lockedView = view
self.__updateLockedView() | Sets the column that the tree view will lock to. If None is supplied,
then locking will be removed.
:param index | <int> || None |
def server(request):
"""
Respond to requests for the server's primary web page.
"""
return direct_to_template(
request,
'server/index.html',
{'user_url': getViewURL(request, idPage),
'server_xrds_url': getViewURL(request, idpXrds),
}) | Respond to requests for the server's primary web page. |
def discover(language):
''' Discovers all registered scrapers to be used for the generic scraping interface. '''
debug('Discovering scrapers for \'%s\'...' % (language,))
global scrapers, discovered
for language in scrapers.iterkeys():
discovered[language] = {}
for scraper in scrapers[language]:
blacklist = ['download', 'isdownloaded', 'getelements']
methods = [method for method in dir(scraper) if method not in blacklist and not method.startswith('_') and callable(getattr(scraper, method))]
for method in methods:
if discovered[language].has_key(method):
discovered[language][method].append(scraper)
else:
discovered[language][method] = [scraper]
debug('%d scrapers with %d methods (overall) registered for \'%s\'.' % (len(scrapers[language]), len(discovered[language].keys()), language)) | Discovers all registered scrapers to be used for the generic scraping interface. |
def check_bidi(data):
"""Checks if sting is valid for bidirectional printing."""
has_l = False
has_ral = False
for char in data:
if stringprep.in_table_d1(char):
has_ral = True
elif stringprep.in_table_d2(char):
has_l = True
if has_l and has_ral:
raise StringprepError("Both RandALCat and LCat characters present")
if has_ral and (not stringprep.in_table_d1(data[0])
or not stringprep.in_table_d1(data[-1])):
raise StringprepError("The first and the last character must"
" be RandALCat")
return data | Checks if sting is valid for bidirectional printing. |
def handle_battery_level(msg):
"""Process an internal battery level message."""
if not msg.gateway.is_sensor(msg.node_id):
return None
msg.gateway.sensors[msg.node_id].battery_level = msg.payload
msg.gateway.alert(msg)
return None | Process an internal battery level message. |
def getScans(self, modifications=True, fdr=True):
"""
get a random scan
"""
if not self.scans:
for i in self:
yield i
else:
for i in self.scans.values():
yield i
yield None | get a random scan |
def _vote_disagreement(self, votes):
"""
Return the disagreement measurement of the given number of votes.
It uses the vote vote to measure the disagreement.
Parameters
----------
votes : list of int, shape==(n_samples, n_students)
The predictions that each student gives to each sample.
Returns
-------
disagreement : list of float, shape=(n_samples)
The vote entropy of the given votes.
"""
ret = []
for candidate in votes:
ret.append(0.0)
lab_count = {}
for lab in candidate:
lab_count[lab] = lab_count.setdefault(lab, 0) + 1
# Using vote entropy to measure disagreement
for lab in lab_count.keys():
ret[-1] -= lab_count[lab] / self.n_students * \
math.log(float(lab_count[lab]) / self.n_students)
return ret | Return the disagreement measurement of the given number of votes.
It uses the vote vote to measure the disagreement.
Parameters
----------
votes : list of int, shape==(n_samples, n_students)
The predictions that each student gives to each sample.
Returns
-------
disagreement : list of float, shape=(n_samples)
The vote entropy of the given votes. |
def describe(self, **kwargs):
"""
:returns: A hash containing attributes of the project or container.
:rtype: dict
Returns a hash with key-value pairs as specified by the API
specification for the `/project-xxxx/describe
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdescribe>`_
method. This will usually include keys such as "id", "name",
"class", "billTo", "created", "modified", and "dataUsage".
"""
# TODO: link to /container-xxxx/describe
api_method = dxpy.api.container_describe
if isinstance(self, DXProject):
api_method = dxpy.api.project_describe
self._desc = api_method(self._dxid, **kwargs)
return self._desc | :returns: A hash containing attributes of the project or container.
:rtype: dict
Returns a hash with key-value pairs as specified by the API
specification for the `/project-xxxx/describe
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdescribe>`_
method. This will usually include keys such as "id", "name",
"class", "billTo", "created", "modified", and "dataUsage". |
async def stop(self):
"""
Stop discarding media.
"""
for task in self.__tracks.values():
if task is not None:
task.cancel()
self.__tracks = {} | Stop discarding media. |
def ko_model(model, field_names=None, data=None):
"""
Given a model, returns the Knockout Model and the Knockout ViewModel.
Takes optional field names and data.
"""
try:
if isinstance(model, str):
modelName = model
else:
modelName = model.__class__.__name__
if field_names:
fields = field_names
else:
fields = get_fields(model)
if hasattr(model, "comparator"):
comparator = str(model.comparator())
else:
comparator = 'id'
modelViewString = render_to_string(
"knockout_modeler/model.js",
{'modelName': modelName, 'fields': fields, 'data': data, 'comparator': comparator}
)
return modelViewString
except Exception as e:
logger.exception(e)
return '' | Given a model, returns the Knockout Model and the Knockout ViewModel.
Takes optional field names and data. |
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {} | FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows. |
def index():
"""
This is not served anywhere in the web application.
It is used explicitly in the context of generating static files since
flask-frozen requires url_for's to crawl content.
url_for's are not used with result.show_result directly and are instead
dynamically generated through javascript for performance purposes.
"""
if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None:
override = current_app.config['ARA_PLAYBOOK_OVERRIDE']
results = (models.TaskResult.query
.join(models.Task)
.filter(models.Task.playbook_id.in_(override)))
else:
results = models.TaskResult.query.all()
return render_template('task_result_index.html', results=results) | This is not served anywhere in the web application.
It is used explicitly in the context of generating static files since
flask-frozen requires url_for's to crawl content.
url_for's are not used with result.show_result directly and are instead
dynamically generated through javascript for performance purposes. |
def list_resource_commands(self):
"""Returns a list of multi-commands for each resource type.
"""
resource_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir,
'resources'
))
answer = set([])
for _, name, _ in pkgutil.iter_modules([resource_path]):
res = tower_cli.get_resource(name)
if not getattr(res, 'internal', False):
answer.add(name)
return sorted(answer) | Returns a list of multi-commands for each resource type. |
def patch_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V2beta1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
return data | partially update status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V2beta1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread. |
def command_max_delay(self, event=None):
""" CPU burst max running time - self.runtime_cfg.max_delay """
try:
max_delay = self.max_delay_var.get()
except ValueError:
max_delay = self.runtime_cfg.max_delay
if max_delay < 0:
max_delay = self.runtime_cfg.max_delay
if max_delay > 0.1:
max_delay = self.runtime_cfg.max_delay
self.runtime_cfg.max_delay = max_delay
self.max_delay_var.set(self.runtime_cfg.max_delay) | CPU burst max running time - self.runtime_cfg.max_delay |
def _read_to_buffer(self) -> Optional[int]:
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
while True:
try:
if self._user_read_buffer:
buf = memoryview(self._read_buffer)[
self._read_buffer_size :
] # type: Union[memoryview, bytearray]
else:
buf = bytearray(self.read_chunk_size)
bytes_read = self.read_from_fd(buf)
except (socket.error, IOError, OSError) as e:
if errno_from_exception(e) == errno.EINTR:
continue
# ssl.SSLError is a subclass of socket.error
if self._is_connreset(e):
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=e)
return None
self.close(exc_info=e)
raise
break
if bytes_read is None:
return 0
elif bytes_read == 0:
self.close()
return 0
if not self._user_read_buffer:
self._read_buffer += memoryview(buf)[:bytes_read]
self._read_buffer_size += bytes_read
finally:
# Break the reference to buf so we don't waste a chunk's worth of
# memory in case an exception hangs on to our stack frame.
del buf
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return bytes_read | Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception. |
def upload(ctx, product, git_ref, dirname, aws_id, aws_secret, ci_env,
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron,
skip_upload):
"""Upload a new site build to LSST the Docs.
"""
logger = logging.getLogger(__name__)
if skip_upload:
click.echo('Skipping ltd upload.')
sys.exit(0)
logger.debug('CI environment: %s', ci_env)
logger.debug('Travis events settings. '
'On Push: %r, PR: %r, API: %r, Cron: %r',
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron)
# Abort upload on Travis CI under certain events
if ci_env == 'travis' and \
_should_skip_travis_event(
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron):
sys.exit(0)
# Authenticate to LTD Keeper host
ensure_login(ctx)
# Detect git refs
git_refs = _get_git_refs(ci_env, git_ref)
build_resource = register_build(
ctx.obj['keeper_hostname'],
ctx.obj['token'],
product,
git_refs
)
logger.debug('Created build resource %r', build_resource)
# Do the upload.
# This cache_control is appropriate for builds since they're immutable.
# The LTD Keeper server changes the cache settings when copying the build
# over to be a mutable edition.
upload_dir(
build_resource['bucket_name'],
build_resource['bucket_root_dir'],
dirname,
aws_access_key_id=aws_id,
aws_secret_access_key=aws_secret,
surrogate_key=build_resource['surrogate_key'],
cache_control='max-age=31536000',
surrogate_control=None,
upload_dir_redirect_objects=True)
logger.debug('Upload complete for %r', build_resource['self_url'])
# Confirm upload
confirm_build(
build_resource['self_url'],
ctx.obj['token']
)
logger.debug('Build %r complete', build_resource['self_url']) | Upload a new site build to LSST the Docs. |
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
Courtesy: Thomas ( http://stackoverflow.com/questions/12090503
/listing-available-com-ports-with-python )
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException) as e:
hfoslog('Could not open serial port:', port, e, type(e),
exc=True, lvl=warn)
return result | Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
Courtesy: Thomas ( http://stackoverflow.com/questions/12090503
/listing-available-com-ports-with-python ) |
def build_asset_array(assets_by_site, tagnames=(), time_event=None):
"""
:param assets_by_site: a list of lists of assets
:param tagnames: a list of tag names
:returns: an array `assetcol`
"""
for assets in assets_by_site:
if len(assets):
first_asset = assets[0]
break
else: # no break
raise ValueError('There are no assets!')
loss_types = []
occupancy_periods = []
for name in sorted(first_asset.values):
if name.startswith('occupants_'):
period = name.split('_', 1)[1]
if period != 'None':
# see scenario_risk test_case_2d
occupancy_periods.append(period)
loss_types.append(name)
# discard occupants for different time periods
else:
loss_types.append('value-' + name)
# loss_types can be ['value-business_interruption', 'value-contents',
# 'value-nonstructural', 'occupants_None', 'occupants_day',
# 'occupants_night', 'occupants_transit']
deductible_d = first_asset.deductibles or {}
limit_d = first_asset.insurance_limits or {}
if deductible_d or limit_d:
logging.warning('Exposures with insuranceLimit/deductible fields are '
'deprecated and may be removed in the future')
retro = ['retrofitted'] if first_asset._retrofitted else []
float_fields = loss_types + retro
int_fields = [(str(name), U16) for name in tagnames]
tagi = {str(name): i for i, name in enumerate(tagnames)}
asset_dt = numpy.dtype(
[('ordinal', U32), ('lon', F32), ('lat', F32), ('site_id', U32),
('number', F32), ('area', F32)] + [
(str(name), float) for name in float_fields] + int_fields)
num_assets = sum(len(assets) for assets in assets_by_site)
assetcol = numpy.zeros(num_assets, asset_dt)
asset_ordinal = 0
fields = set(asset_dt.fields)
for sid, assets_ in enumerate(assets_by_site):
for asset in assets_:
asset.ordinal = asset_ordinal
record = assetcol[asset_ordinal]
asset_ordinal += 1
for field in fields:
if field == 'ordinal':
value = asset.ordinal
elif field == 'number':
value = asset.number
elif field == 'area':
value = asset.area
elif field == 'site_id':
value = sid
elif field == 'lon':
value = asset.location[0]
elif field == 'lat':
value = asset.location[1]
elif field.startswith('occupants_'):
value = asset.values[field]
elif field == 'retrofitted':
value = asset.retrofitted()
elif field in tagnames:
value = asset.tagidxs[tagi[field]]
else:
name, lt = field.split('-')
value = asset.value(lt, time_event)
record[field] = value
return assetcol, ' '.join(occupancy_periods) | :param assets_by_site: a list of lists of assets
:param tagnames: a list of tag names
:returns: an array `assetcol` |
def is_successful(self, retry=False):
"""
If the instance runs successfully.
:return: True if successful else False
:rtype: bool
"""
if not self.is_terminated(retry=retry):
return False
retry_num = options.retry_times
while retry_num > 0:
try:
statuses = self.get_task_statuses()
return all(task.status == Instance.Task.TaskStatus.SUCCESS
for task in statuses.values())
except (errors.InternalServerError, errors.RequestTimeTooSkewed):
retry_num -= 1
if not retry or retry_num <= 0:
raise | If the instance runs successfully.
:return: True if successful else False
:rtype: bool |
def add_report(self, specification_name, report):
"""
Adds a given report with the given specification_name as key
to the reports list and computes the number of success, failures
and errors
Args:
specification_name: string representing the specification (with ".spec")
report: The
"""
self._reports[specification_name] = report
self._total = self._total + report.testsRun
self._failures = self._failures + len(report.failures)
self._errors = self._errors + len(report.errors)
self._success = self._total - self._failures - self._errors | Adds a given report with the given specification_name as key
to the reports list and computes the number of success, failures
and errors
Args:
specification_name: string representing the specification (with ".spec")
report: The |
def parse(self):
"""Parse show subcommand."""
parser = self.subparser.add_parser(
"show",
help="Show workspace details",
description="Show workspace details.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--all', action='store_true', help="All workspaces")
group.add_argument('name', type=str, help="Workspace name", nargs='?') | Parse show subcommand. |
def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs | Remove single-dimensional entries from the shape of a tensor. |
def get_chart(self, id, **kwargs):
""""Retrieve a (v2) chart by id.
"""
resp = self._get_object_by_name(self._CHART_ENDPOINT_SUFFIX, id,
**kwargs)
return resp | Retrieve a (v2) chart by id. |
def p_multiplicative_expr(self, p):
"""multiplicative_expr : unary_expr
| multiplicative_expr MULT unary_expr
| multiplicative_expr DIV unary_expr
| multiplicative_expr MOD unary_expr
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ast.BinOp(op=p[2], left=p[1], right=p[3]) | multiplicative_expr : unary_expr
| multiplicative_expr MULT unary_expr
| multiplicative_expr DIV unary_expr
| multiplicative_expr MOD unary_expr |
def get_text(self):
"""Get the text in its current state."""
return u''.join(u'{0}'.format(b) for b in self.text) | Get the text in its current state. |
def just_find_proxy(pacfile, url, host=None):
"""
This function is a wrapper around init, parse_pac, find_proxy
and cleanup. This is the function to call if you want to find
proxy just for one url.
"""
if not os.path.isfile(pacfile):
raise IOError('Pac file does not exist: {}'.format(pacfile))
init()
parse_pac(pacfile)
proxy = find_proxy(url,host)
cleanup()
return proxy | This function is a wrapper around init, parse_pac, find_proxy
and cleanup. This is the function to call if you want to find
proxy just for one url. |
def _runcog(options, files, uncog=False):
"""Common function for the cog and runcog tasks."""
options.order('cog', 'sphinx', add_rest=True)
c = Cog()
if uncog:
c.options.bNoGenerate = True
c.options.bReplace = True
c.options.bDeleteCode = options.get("delete_code", False)
includedir = options.get('includedir', None)
if includedir:
include = Includer(includedir, cog=c,
include_markers=options.get("include_markers"))
# load cog's namespace with our convenience functions.
c.options.defines['include'] = include
c.options.defines['sh'] = _cogsh(c)
c.options.sBeginSpec = options.get('beginspec', '[[[cog')
c.options.sEndSpec = options.get('endspec', ']]]')
c.options.sEndOutput = options.get('endoutput', '[[[end]]]')
basedir = options.get('basedir', None)
if basedir is None:
basedir = (path(options.get('docroot', "docs"))
/ options.get('sourcedir', ""))
basedir = path(basedir)
if not files:
pattern = options.get("pattern", "*.rst")
if pattern:
files = basedir.walkfiles(pattern)
else:
files = basedir.walkfiles()
for f in sorted(files):
dry("cog %s" % f, c.processOneFile, f) | Common function for the cog and runcog tasks. |
def print_object_results(obj_result):
"""Print the results of validating an object.
Args:
obj_result: An ObjectValidationResults instance.
"""
print_results_header(obj_result.object_id, obj_result.is_valid)
if obj_result.warnings:
print_warning_results(obj_result, 1)
if obj_result.errors:
print_schema_results(obj_result, 1) | Print the results of validating an object.
Args:
obj_result: An ObjectValidationResults instance. |
def parse_summary(self, contents):
"""Parses summary file into a dictionary of counts."""
lines = contents.strip().split('\n')
data = {}
for row in lines[1:]:
split = row.strip().split('\t')
sample = split[0]
data[sample] = {
'species_a': int(split[1]),
'species_b': int(split[2]),
'ambiguous': int(split[3])
}
return data | Parses summary file into a dictionary of counts. |
def _onError(self, message):
"""Memorizies a parser error message"""
self.isOK = False
if message.strip() != "":
self.errors.append(message) | Memorizies a parser error message |
def _import_lua_dependencies(lua, lua_globals):
"""
Imports lua dependencies that are supported by redis lua scripts.
The current implementation is fragile to the target platform and lua version
and may be disabled if these imports are not needed.
Included:
- cjson lib.
Pending:
- base lib.
- table lib.
- string lib.
- math lib.
- debug lib.
- cmsgpack lib.
"""
if sys.platform not in ('darwin', 'windows'):
import ctypes
ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL)
try:
lua_globals.cjson = lua.eval('require "cjson"')
except RuntimeError:
raise RuntimeError("cjson not installed") | Imports lua dependencies that are supported by redis lua scripts.
The current implementation is fragile to the target platform and lua version
and may be disabled if these imports are not needed.
Included:
- cjson lib.
Pending:
- base lib.
- table lib.
- string lib.
- math lib.
- debug lib.
- cmsgpack lib. |
def transpose(self, rows):
"""
Transposes the grid to allow for cols
"""
res = OrderedDict()
for row, cols in rows.items():
for col, cell in cols.items():
if col not in res:
res[col] = OrderedDict()
res[col][row] = cell
return res | Transposes the grid to allow for cols |
def _example_from_allof(self, prop_spec):
"""Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict
"""
example_dict = {}
for definition in prop_spec['allOf']:
update = self.get_example_from_prop_spec(definition, True)
example_dict.update(update)
return example_dict | Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict |
def patchURL(self, url, headers, body):
"""
Request a URL using the HTTP method PATCH.
"""
return self._load_resource("PATCH", url, headers, body) | Request a URL using the HTTP method PATCH. |
def _get_position(self):
"""
获取雪球持仓
:return:
"""
portfolio_code = self.account_config["portfolio_code"]
portfolio_info = self._get_portfolio_info(portfolio_code)
position = portfolio_info["view_rebalancing"] # 仓位结构
stocks = position["holdings"] # 持仓股票
return stocks | 获取雪球持仓
:return: |
def serialize_filesec(self):
"""
Return the file Element for this file, appropriate for use in a fileSec.
If this is not an Item or has no use, return None.
:return: fileSec element for this FSEntry
"""
if (
self.type.lower() not in ("item", "archival information package")
or self.use is None
):
return None
el = etree.Element(utils.lxmlns("mets") + "file", ID=self.file_id())
if self.group_id():
el.attrib["GROUPID"] = self.group_id()
if self.admids:
el.set("ADMID", " ".join(self.admids))
if self.checksum and self.checksumtype:
el.attrib["CHECKSUM"] = self.checksum
el.attrib["CHECKSUMTYPE"] = self.checksumtype
if self.path:
flocat = etree.SubElement(el, utils.lxmlns("mets") + "FLocat")
# Setting manually so order is correct
try:
flocat.set(utils.lxmlns("xlink") + "href", utils.urlencode(self.path))
except ValueError:
raise exceptions.SerializeError(
'Value "{}" (for attribute xlink:href) is not a valid'
" URL.".format(self.path)
)
flocat.set("LOCTYPE", "OTHER")
flocat.set("OTHERLOCTYPE", "SYSTEM")
for transform_file in self.transform_files:
transform_file_el = etree.SubElement(
el, utils.lxmlns("mets") + "transformFile"
)
for key, val in transform_file.items():
attribute = "transform{}".format(key).upper()
transform_file_el.attrib[attribute] = str(val)
return el | Return the file Element for this file, appropriate for use in a fileSec.
If this is not an Item or has no use, return None.
:return: fileSec element for this FSEntry |
def gradient_factory(name):
"""Create gradient `Functional` for some ufuncs."""
if name == 'sin':
def gradient(self):
"""Return the gradient operator."""
return cos(self.domain)
elif name == 'cos':
def gradient(self):
"""Return the gradient operator."""
return -sin(self.domain)
elif name == 'tan':
def gradient(self):
"""Return the gradient operator."""
return 1 + square(self.domain) * self
elif name == 'sqrt':
def gradient(self):
"""Return the gradient operator."""
return FunctionalQuotient(ConstantFunctional(self.domain, 0.5),
self)
elif name == 'square':
def gradient(self):
"""Return the gradient operator."""
return ScalingFunctional(self.domain, 2.0)
elif name == 'log':
def gradient(self):
"""Return the gradient operator."""
return reciprocal(self.domain)
elif name == 'exp':
def gradient(self):
"""Return the gradient operator."""
return self
elif name == 'reciprocal':
def gradient(self):
"""Return the gradient operator."""
return FunctionalQuotient(ConstantFunctional(self.domain, -1.0),
square(self.domain))
elif name == 'sinh':
def gradient(self):
"""Return the gradient operator."""
return cosh(self.domain)
elif name == 'cosh':
def gradient(self):
"""Return the gradient operator."""
return sinh(self.domain)
else:
# Fallback to default
gradient = Functional.gradient
return gradient | Create gradient `Functional` for some ufuncs. |
def register_hooks(app):
"""Register hooks."""
@app.before_request
def before_request():
g.user = get_current_user()
if g.user and g.user.is_admin:
g._before_request_time = time.time()
@app.after_request
def after_request(response):
if hasattr(g, '_before_request_time'):
delta = time.time() - g._before_request_time
response.headers['X-Render-Time'] = delta * 1000
return response | Register hooks. |
def issue_add_comment(self, issue_key, comment, visibility=None):
"""
Add comment into Jira issue
:param issue_key:
:param comment:
:param visibility: OPTIONAL
:return:
"""
url = 'rest/api/2/issue/{issueIdOrKey}/comment'.format(issueIdOrKey=issue_key)
data = {'body': comment}
if visibility:
data['visibility'] = visibility
return self.post(url, data=data) | Add comment into Jira issue
:param issue_key:
:param comment:
:param visibility: OPTIONAL
:return: |
def on_change(self, path, event_type):
"""Respond to changes in the file system
This method will be given the path to a file that
has changed on disk. We need to reload the keywords
from that file
"""
# I can do all this work in a sql statement, but
# for debugging it's easier to do it in stages.
sql = """SELECT collection_id
FROM collection_table
WHERE path == ?
"""
cursor = self._execute(sql, (path,))
results = cursor.fetchall()
# there should always be exactly one result, but
# there's no harm in using a loop to process the
# single result
for result in results:
collection_id = result[0]
# remove all keywords in this collection
sql = """DELETE from keyword_table
WHERE collection_id == ?
"""
cursor = self._execute(sql, (collection_id,))
self._load_keywords(collection_id, path=path) | Respond to changes in the file system
This method will be given the path to a file that
has changed on disk. We need to reload the keywords
from that file |
def get_full_durable_object(arn, event_time, durable_model):
"""
Utility method to fetch items from the Durable table if they are too big for SNS/SQS.
:param record:
:param durable_model:
:return:
"""
LOG.debug(f'[-->] Item with ARN: {arn} was too big for SNS -- fetching it from the Durable table...')
item = list(durable_model.query(arn, durable_model.eventTime == event_time))
# It is not clear if this would ever be the case... We will consider this an error condition for now.
if not item:
LOG.error(f'[?] Item with ARN/Event Time: {arn}/{event_time} was NOT found in the Durable table...'
f' This is odd.')
raise DurableItemIsMissingException({"item_arn": arn, "event_time": event_time})
# We need to place the real configuration data into the record so it can be deserialized into
# the durable model correctly:
return item[0] | Utility method to fetch items from the Durable table if they are too big for SNS/SQS.
:param record:
:param durable_model:
:return: |
def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',
errors=None):
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
Time zone localization helps to switch from time zone aware to time
zone unaware objects.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. versionadded:: 0.24.0
errors : {'raise', 'coerce'}, default None
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified time zone (e.g. due to a transition from
or to DST time). Use ``nonexistent='raise'`` instead.
- 'coerce' will return NaT if the timestamp can not be converted
to the specified time zone. Use ``nonexistent='NaT'`` instead.
.. deprecated:: 0.24.0
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq='D')
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw']
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw']
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw']
"""
if errors is not None:
warnings.warn("The errors argument is deprecated and will be "
"removed in a future release. Use "
"nonexistent='NaT' or nonexistent='raise' "
"instead.", FutureWarning)
if errors == 'coerce':
nonexistent = 'NaT'
elif errors == 'raise':
nonexistent = 'raise'
else:
raise ValueError("The errors argument must be either 'coerce' "
"or 'raise'.")
nonexistent_options = ('raise', 'NaT', 'shift_forward',
'shift_backward')
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta):
raise ValueError("The nonexistent argument must be one of 'raise',"
" 'NaT', 'shift_forward', 'shift_backward' or"
" a timedelta object")
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert(self.asi8, timezones.UTC,
self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = conversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent,
)
new_dates = new_dates.view(_NS_DTYPE)
dtype = tz_to_dtype(tz)
return self._simple_new(new_dates, dtype=dtype, freq=self.freq) | Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
Time zone localization helps to switch from time zone aware to time
zone unaware objects.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. versionadded:: 0.24.0
errors : {'raise', 'coerce'}, default None
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified time zone (e.g. due to a transition from
or to DST time). Use ``nonexistent='raise'`` instead.
- 'coerce' will return NaT if the timestamp can not be converted
to the specified time zone. Use ``nonexistent='NaT'`` instead.
.. deprecated:: 0.24.0
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq='D')
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw']
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw']
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw'] |
def to_dict(self):
"""Generate a dict for this object's attributes.
:return: A dict representing an :class:`Asset`
"""
rv = {'code': self.code}
if not self.is_native():
rv['issuer'] = self.issuer
rv['type'] = self.type
else:
rv['type'] = 'native'
return rv | Generate a dict for this object's attributes.
:return: A dict representing an :class:`Asset` |
def apply_reactions(self, user):
"""
Set active topics and limits after a response has been triggered
:param user: The user triggering the response
:type user: agentml.User
"""
# User attributes
if self.global_limit:
self._log.info('Enforcing Global Trigger Limit of {num} seconds'.format(num=self.global_limit))
self.agentml.set_limit(self, (time() + self.global_limit), self.glimit_blocking)
if self.user_limit:
self._log.info('Enforcing User Trigger Limit of {num} seconds'.format(num=self.user_limit))
user.set_limit(self, (time() + self.user_limit), self.ulimit_blocking)
for var in self.vars:
var_type, var_name, var_value = var
var_name = ''.join(map(str, var_name)) if isinstance(var_name, Iterable) else var_name
var_value = ''.join(map(str, var_value)) if isinstance(var_value, Iterable) else var_value
# Set a user variable
if var_type == 'user':
self.user.set_var(var_name, var_value)
# Set a global variable
if var_type == 'global':
self.agentml.set_var(var_name, var_value) | Set active topics and limits after a response has been triggered
:param user: The user triggering the response
:type user: agentml.User |
def js_extractor(response):
"""Extract js files from the response body"""
# Extract .js files
matches = rscript.findall(response)
for match in matches:
match = match[2].replace('\'', '').replace('"', '')
verb('JS file', match)
bad_scripts.add(match) | Extract js files from the response body |
def unpack_value(format_string, stream):
"""Helper function to unpack struct data from a stream and update the signature verifier.
:param str format_string: Struct format string
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Unpacked values
:rtype: tuple
"""
message_bytes = stream.read(struct.calcsize(format_string))
return struct.unpack(format_string, message_bytes) | Helper function to unpack struct data from a stream and update the signature verifier.
:param str format_string: Struct format string
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Unpacked values
:rtype: tuple |
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
from pip.index import Link
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
not_found = None
# ############################################# #
# # Search for archive to fulfill requirement # #
# ############################################# #
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall and not req_to_install.url:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
except DistributionNotFound as exc:
not_found = exc
else:
# Avoid the need to call find_requirement again
req_to_install.url = url.url
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by
)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
if best_installed:
logger.info(
'Requirement already up-to-date: %s',
req_to_install,
)
else:
logger.info(
'Requirement already satisfied (use --upgrade to '
'upgrade): %s',
req_to_install,
)
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
elif install:
if (req_to_install.url
and req_to_install.url.lower().startswith('file:')):
path = url_to_path(req_to_install.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
is_wheel = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
# NB: This call can result in the creation of a temporary
# build directory
location = req_to_install.build_location(
self.build_dir,
)
unpack = True
url = None
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
if os.path.exists(os.path.join(location, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, location)
)
else:
# FIXME: this won't upgrade when there's an existing
# package unpacked in `location`
if req_to_install.url is None:
if not_found:
raise not_found
url = finder.find_requirement(
req_to_install,
upgrade=self.upgrade,
)
else:
# FIXME: should req_to_install.url already be a
# link?
url = Link(req_to_install.url)
assert url
if url:
try:
if (
url.filename.endswith(wheel_ext)
and self.wheel_download_dir
):
# when doing 'pip wheel`
download_dir = self.wheel_download_dir
do_download = True
else:
download_dir = self.download_dir
do_download = self.is_download
unpack_url(
url, location, download_dir,
do_download, session=self.session,
)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, url)
)
else:
unpack = False
if unpack:
is_wheel = url and url.filename.endswith(wheel_ext)
if self.is_download:
req_to_install.source_dir = location
if not is_wheel:
# FIXME:https://github.com/pypa/pip/issues/1112
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
elif is_wheel:
req_to_install.source_dir = location
req_to_install.url = url.url
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
req_to_install.assert_source_matches_version()
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
install = False
# ###################### #
# # parse dependencies # #
# ###################### #
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
if is_wheel:
dist = list(
pkg_resources.find_distributions(location)
)[0]
else: # sdists
if req_to_install.satisfied_by:
dist = req_to_install.satisfied_by
else:
dist = req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not self.ignore_dependencies:
for subreq in dist.requires(
req_to_install.extras):
if self.has_requirement(
subreq.project_name):
# FIXME: check for conflict
continue
subreq = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
)
reqs.append(subreq)
self.add_requirement(subreq)
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install)
# cleanup tmp src
if (self.is_download or
req_to_install._temp_build_dir is not None):
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install) | Prepare process. Create temp directories, download and/or unpack files. |
def make_serializable(data, mutable=True, key_stringifier=lambda x:x, simplify_midnight_datetime=True):
r"""Make sure the data structure is json serializable (json.dumps-able), all they way down to scalars in nested structures.
If mutable=False then return tuples for all iterables, except basestrings (strs),
so that they can be used as keys in a Mapping (dict).
>>> from collections import OrderedDict
>>> from decimal import Decimal
>>> data = {'x': Decimal('01.234567891113151719'), 'X': [{('y', 'z'): {'q': 'A\xFFB'}}, 'ender'] }
>>> make_serializable(OrderedDict(data)) == {'X': [{('y', 'z'): {'q': 'A\xc3\xbfB'}}, 'ender'], 'x': 1.2345678911131517}
True
>>> make_serializable({'ABCs': list('abc'), datetime.datetime(2014,10,31): datetime.datetime(2014,10,31,23,59,59)}
... ) == {'ABCs': ['2014-10-16 00:00:00', 'b', 'c'], '2014-10-31 00:00:00': '2014-10-31 23:59:59'}
True
"""
# print 'serializabling: ' + repr(data)
# print 'type: ' + repr(type(data))
if isinstance(data, (datetime.datetime, datetime.date, datetime.time)):
if isinstance(data, datetime.datetime):
if not any((data.hour, data.miniute, data.seconds)):
return datetime.date(data.year, data.month, data.day)
elif data.year == data.month == data.seconds == 1:
return datetime.time(data.hour, data.minute, data.second)
return data
# s = unicode(data)
# if s.endswith('00:00:00'):
# return s[:8]
# return s
#print 'nonstring type: ' + repr(type(data))
elif isinstance(data, Model):
if isinstance(data, datetime.datetime):
if not any((data.hour, data.miniute, data.seconds)):
return datetime.date(data.year, data.month, data.day)
elif data.year == data.month == data.seconds == 1:
return datetime.time(data.hour, data.minute, data.second)
return data
elif isinstance(data, Mapping):
mapping = tuple((make_serializable(k, mutable=False, key_stringifier=key_stringifier), make_serializable(v, mutable=mutable)) for (k, v) in data.iteritems())
# print 'mapping tuple = %s' % repr(mapping)
#print 'keys list = %s' % repr([make_serializable(k, mutable=False) for k in data])
# this mutability business is probably unneccessary because the keys of the mapping will already be immutable... at least until python 3 MutableMappings
if mutable:
return dict(mapping)
return mapping
elif hasattr(data, '__iter__'):
if mutable:
#print list(make_serializable(v, mutable=mutable) for v in data)
return list(make_serializable(v, mutable=mutable) for v in data)
else:
#print tuple(make_serializable(v, mutable=mutable) for v in data)
return key_stringifier(tuple(make_serializable(v, mutable=mutable) for v in data))
elif isinstance(data, (float, Decimal)):
return float(data)
elif isinstance(data, basestring):
# Data is either a string or some other object class Django.db.models.Model etc
data = db.clean_utf8(data)
try:
return int(data)
except:
try:
return float(data)
except:
try:
# see if can be coerced into datetime by first coercing to a string
return make_serializable(dateutil.parse(unicode(data)))
except:
try:
# see if can be coerced into a dict (e.g. Dajngo Model or custom user module or class)
return make_serializable(data.__dict__)
except:
# stringify it and give up
return unicode(data) | r"""Make sure the data structure is json serializable (json.dumps-able), all they way down to scalars in nested structures.
If mutable=False then return tuples for all iterables, except basestrings (strs),
so that they can be used as keys in a Mapping (dict).
>>> from collections import OrderedDict
>>> from decimal import Decimal
>>> data = {'x': Decimal('01.234567891113151719'), 'X': [{('y', 'z'): {'q': 'A\xFFB'}}, 'ender'] }
>>> make_serializable(OrderedDict(data)) == {'X': [{('y', 'z'): {'q': 'A\xc3\xbfB'}}, 'ender'], 'x': 1.2345678911131517}
True
>>> make_serializable({'ABCs': list('abc'), datetime.datetime(2014,10,31): datetime.datetime(2014,10,31,23,59,59)}
... ) == {'ABCs': ['2014-10-16 00:00:00', 'b', 'c'], '2014-10-31 00:00:00': '2014-10-31 23:59:59'}
True |
def build_from_token_counts(self, token_counts, min_count, num_iterations=4):
"""Train a SubwordTextTokenizer based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer; how many iterations of refinement.
"""
self._init_alphabet_from_tokens(six.iterkeys(token_counts))
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet))
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in xrange(num_iterations):
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
for end in xrange(start + 1, len(escaped_token) + 1):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its
# prefixes.
new_subtoken_strings = []
for lsub in xrange(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in xrange(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are
# encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a) for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
self._init_subtokens_from_list([subtoken for _, subtoken in new_subtoken_strings]) | Train a SubwordTextTokenizer based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer; how many iterations of refinement. |
def get(self, buffer_type, offset):
"""Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get
"""
if buffer_type == u'streaming':
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if offset >= len(chosen_buffer):
raise StreamEmptyError("Invalid index given in get command", requested=offset, stored=len(chosen_buffer), buffer=buffer_type)
return chosen_buffer[offset] | Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get |
def update(self, report):
"""
Add the items from the given report.
"""
self.tp.extend(pack_boxes(report.tp, self.title))
self.fp.extend(pack_boxes(report.fp, self.title))
self.fn.extend(pack_boxes(report.fn, self.title)) | Add the items from the given report. |
def plot_hurst_hist():
"""
Plots a histogram of values obtained for the hurst exponent of uniformly
distributed white noise.
This function requires the package ``matplotlib``.
"""
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
hs = [nolds.hurst_rs(np.random.random(size=10000), corrected=True) for _ in range(100)]
plt.hist(hs, bins=20)
plt.xlabel("esimated value of hurst exponent")
plt.ylabel("number of experiments")
plt.show() | Plots a histogram of values obtained for the hurst exponent of uniformly
distributed white noise.
This function requires the package ``matplotlib``. |
def enable_vxlan_feature(self, nexus_host, nve_int_num, src_intf):
"""Enable VXLAN on the switch."""
# Configure the "feature" commands and NVE interface
# (without "member" subcommand configuration).
# The Nexus 9K will not allow the "interface nve" configuration
# until the "feature nv overlay" command is issued and installed.
# To get around the N9K failing on the "interface nve" command
# send the two XML snippets down separately.
starttime = time.time()
# Do CLI 'feature nv overlay'
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE,
(snipp.BODY_VXLAN_STATE % "enabled"))
# Do CLI 'feature vn-segment-vlan-based'
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE,
(snipp.BODY_VNSEG_STATE % "enabled"))
# Do CLI 'int nve1' to Create nve1
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_CREATE % nve_int_num))
# Do CLI 'no shut
# source-interface loopback %s'
# beneath int nve1
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_ADD_LOOPBACK % ("enabled", src_intf)))
self.capture_and_print_timeshot(
starttime, "enable_vxlan",
switch=nexus_host) | Enable VXLAN on the switch. |
def delete_repository(self, repository, params=None):
"""
Removes a shared file system repository.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html>`_
:arg repository: A comma-separated list of repository names
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
"""
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'repository'.")
return self.transport.perform_request('DELETE',
_make_path('_snapshot', repository), params=params) | Removes a shared file system repository.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html>`_
:arg repository: A comma-separated list of repository names
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout |
def example_describe_configs(a, args):
""" describe configs """
resources = [ConfigResource(restype, resname) for
restype, resname in zip(args[0::2], args[1::2])]
fs = a.describe_configs(resources)
# Wait for operation to finish.
for res, f in fs.items():
try:
configs = f.result()
for config in iter(configs.values()):
print_config(config, 1)
except KafkaException as e:
print("Failed to describe {}: {}".format(res, e))
except Exception:
raise | describe configs |
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v | Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'. |
def calc_stress_tf(self, lin, lout, damped):
"""Compute the stress transfer function.
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
"""
tf = self.calc_strain_tf(lin, lout)
if damped:
# Scale by complex shear modulus to include the influence of
# damping
tf *= lout.layer.comp_shear_mod
else:
tf *= lout.layer.shear_mod
return tf | Compute the stress transfer function.
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer. |
def _intertext_score(full_text):
'''returns tuple of scored sentences
in order of appearance
Note: Doing an A/B test to
compare results, reverting to
original algorithm.'''
sentences = sentence_tokenizer(full_text)
norm = _normalize(sentences)
similarity_matrix = pairwise_kernels(norm, metric='cosine')
scores = _textrank(similarity_matrix)
scored_sentences = []
for i, s in enumerate(sentences):
scored_sentences.append((scores[i],i,s))
top_scorers = sorted(scored_sentences,
key=lambda tup: tup[0],
reverse=True)
return top_scorers | returns tuple of scored sentences
in order of appearance
Note: Doing an A/B test to
compare results, reverting to
original algorithm. |
def constrains(self):
"""
returns a list of parameters that are constrained by this parameter
"""
params = []
for constraint in self.in_constraints:
for var in constraint._vars:
param = var.get_parameter()
if param.component == constraint.component and param.qualifier == constraint.qualifier:
if param not in params and param.uniqueid != self.uniqueid:
params.append(param)
return params | returns a list of parameters that are constrained by this parameter |
def check_ns_run_threads(run):
"""Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties.
"""
assert run['thread_labels'].dtype == int
uniq_th = np.unique(run['thread_labels'])
assert np.array_equal(
np.asarray(range(run['thread_min_max'].shape[0])), uniq_th), \
str(uniq_th)
# Check thread_min_max
assert np.any(run['thread_min_max'][:, 0] == -np.inf), (
'Run should have at least one thread which starts by sampling the ' +
'whole prior')
for th_lab in uniq_th:
inds = np.where(run['thread_labels'] == th_lab)[0]
th_info = 'thread label={}, first_logl={}, thread_min_max={}'.format(
th_lab, run['logl'][inds[0]], run['thread_min_max'][th_lab, :])
assert run['thread_min_max'][th_lab, 0] <= run['logl'][inds[0]], (
'First point in thread has logl less than thread min logl! ' +
th_info + ', difference={}'.format(
run['logl'][inds[0]] - run['thread_min_max'][th_lab, 0]))
assert run['thread_min_max'][th_lab, 1] == run['logl'][inds[-1]], (
'Last point in thread logl != thread end logl! ' + th_info) | Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties. |
def get_appium_sessionId(self):
"""Returns the current session ID as a reference"""
self._info("Appium Session ID: " + self._current_application().session_id)
return self._current_application().session_id | Returns the current session ID as a reference |
def git_pull(repo_dir, remote="origin", ref=None, update_head_ok=False):
"""Do a git pull of `ref` from `remote`."""
command = ['git', 'pull']
if update_head_ok:
command.append('--update-head-ok')
command.append(pipes.quote(remote))
if ref:
command.append(ref)
return execute_git_command(command, repo_dir=repo_dir) | Do a git pull of `ref` from `remote`. |
def encode(locations):
"""
:param locations: locations list containig (lat, lon) two-tuples
:return: encoded polyline string
"""
encoded = (
(_encode_value(lat, prev_lat), _encode_value(lon, prev_lon))
for (prev_lat, prev_lon), (lat, lon)
in _iterate_with_previous(locations, first=(0, 0))
)
encoded = chain.from_iterable(encoded)
return ''.join(c for r in encoded for c in r) | :param locations: locations list containig (lat, lon) two-tuples
:return: encoded polyline string |
def confd_state_internal_callpoints_typepoint_registration_type_range_range_daemon_error(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
callpoints = ET.SubElement(internal, "callpoints")
typepoint = ET.SubElement(callpoints, "typepoint")
id_key = ET.SubElement(typepoint, "id")
id_key.text = kwargs.pop('id')
registration_type = ET.SubElement(typepoint, "registration-type")
range = ET.SubElement(registration_type, "range")
range = ET.SubElement(range, "range")
daemon = ET.SubElement(range, "daemon")
error = ET.SubElement(daemon, "error")
error.text = kwargs.pop('error')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def _get_file_by_alias(part, files):
""" Given a command part, find the file it represents. If not found,
then returns a new token representing that file.
:throws ValueError: if the value is not a command file alias.
"""
# Make Output
if _is_output(part):
return Output.from_string(part.pop())
# Search/Make Input
else:
inputs = [[]]
if part.magic_or:
and_or = 'or'
else:
and_or = 'and'
for cut in part.asList():
if cut == OR_TOKEN:
inputs.append([])
continue
if cut == AND_TOKEN:
continue
input = Input(cut, filename=cut, and_or=and_or)
for file in files:
if file.alias == cut:
# Override the filename
input.filename = file.filename
inputs[-1].append(input)
break
else:
inputs[-1].append(input)
return [input for input in inputs if input] | Given a command part, find the file it represents. If not found,
then returns a new token representing that file.
:throws ValueError: if the value is not a command file alias. |
def averageConvergencePoint(self, prefix, minOverlap, maxOverlap,
settlingTime=1, firstStat=0, lastStat=None):
"""
For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time and accuracy across all objects.
Using inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration and accuracy across
all runs.
:param prefix: Use this prefix to filter relevant stats.
:param minOverlap: Min target overlap
:param maxOverlap: Max target overlap
:param settlingTime: Setting time between iteration. Default 1
:return: Average settling iteration and accuracy across all runs
"""
convergenceSum = 0.0
numCorrect = 0.0
inferenceLength = 1000000
# For each object
for stats in self.statistics[firstStat:lastStat]:
# For each L2 column locate convergence time
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
inferenceLength = len(stats[key])
columnConvergence = L4L2Experiment._locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint) / settlingTime)
if ceil(float(convergencePoint) / settlingTime) <= inferenceLength:
numCorrect += 1
if len(self.statistics[firstStat:lastStat]) == 0:
return 10000.0, 0.0
return (convergenceSum / len(self.statistics[firstStat:lastStat]),
numCorrect / len(self.statistics[firstStat:lastStat]) ) | For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time and accuracy across all objects.
Using inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration and accuracy across
all runs.
:param prefix: Use this prefix to filter relevant stats.
:param minOverlap: Min target overlap
:param maxOverlap: Max target overlap
:param settlingTime: Setting time between iteration. Default 1
:return: Average settling iteration and accuracy across all runs |
def get_orm_column_names(cls: Type, sort: bool = False) -> List[str]:
"""
Gets column names (that is, database column names) from an SQLAlchemy
ORM class.
"""
colnames = [col.name for col in get_orm_columns(cls)]
return sorted(colnames) if sort else colnames | Gets column names (that is, database column names) from an SQLAlchemy
ORM class. |
def expr(args):
"""
%prog expr block exp layout napus.bed
Plot a composite figure showing synteny and the expression level between
homeologs in two tissues - total 4 lists of values. block file contains the
gene pairs between AN and CN.
"""
from jcvi.graphics.base import red_purple as default_cm
p = OptionParser(expr.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
if len(args) != 4:
sys.exit(not p.print_help())
block, exp, layout, napusbed = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
s = Synteny(fig, root, block, napusbed, layout)
# Import the expression values
# Columns are: leaf-A, leaf-C, root-A, root-C
fp = open(exp)
data = {}
for row in fp:
gid, lf, rt = row.split()
lf, rt = float(lf), float(rt)
data[gid] = (lf, rt)
rA, rB = s.rr
gA = [x.accn for x in rA.genes]
gC = [x.accn for x in rB.genes]
A = [data.get(x, (0, 0)) for x in gA]
C = [data.get(x, (0, 0)) for x in gC]
A = np.array(A)
C = np.array(C)
A = np.transpose(A)
C = np.transpose(C)
d, h = .01, .1
lsg = "lightslategrey"
coords = s.gg # Coordinates of the genes
axes = []
for j, (y, gg) in enumerate(((.79, gA), (.24, gC))):
r = s.rr[j]
x = r.xstart
w = r.xend - r.xstart
ax = fig.add_axes([x, y, w, h])
axes.append(ax)
root.add_patch(Rectangle((x - h, y - d), w + h + d, h + 2 * d, fill=False,
ec=lsg, lw=1))
root.text(x - d, y + 3 * h / 4, "root", ha="right", va="center")
root.text(x - d, y + h / 4, "leaf", ha="right", va="center")
ty = y - 2 * d if y > .5 else y + h + 2 * d
nrows = len(gg)
for i, g in enumerate(gg):
start, end = coords[(j, g)]
sx, sy = start
ex, ey = end
assert sy == ey
sy = sy + 2 * d if sy > .5 else sy - 2 * d
root.plot(((sx + ex) / 2, x + w * (i + .5) / nrows), (sy, ty),
lw=1, ls=":", color="k", alpha=.2)
axA, axC = axes
p = axA.pcolormesh(A, cmap=default_cm)
p = axC.pcolormesh(C, cmap=default_cm)
axA.set_xlim(0, len(gA))
axC.set_xlim(0, len(gC))
x, y, w, h = .35, .1, .3, .05
ax_colorbar = fig.add_axes([x, y, w, h])
fig.colorbar(p, cax=ax_colorbar, orientation='horizontal')
root.text(x - d, y + h / 2, "RPKM", ha="right", va="center")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
for x in (axA, axC, root):
x.set_axis_off()
image_name = "napusf4b." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog expr block exp layout napus.bed
Plot a composite figure showing synteny and the expression level between
homeologs in two tissues - total 4 lists of values. block file contains the
gene pairs between AN and CN. |
def area(self):
r"""The area of the current surface.
For surfaces in :math:`\mathbf{R}^2`, this computes the area via
Green's theorem. Using the vector field :math:`\mathbf{F} =
\left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2`
Green's theorem says twice the area is equal to
.. math::
\int_{B\left(\mathcal{U}\right)} 2 \, d\mathbf{x} =
\int_{\partial B\left(\mathcal{U}\right)} -y \, dx + x \, dy.
This relies on the assumption that the current surface is valid, which
implies that the image of the unit triangle under the B |eacute| zier
map --- :math:`B\left(\mathcal{U}\right)` --- has the edges of the
surface as its boundary.
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current surface.
Raises:
NotImplementedError: If the current surface isn't in
:math:`\mathbf{R}^2`.
"""
if self._dimension != 2:
raise NotImplementedError(
"2D is the only supported dimension",
"Current dimension",
self._dimension,
)
edge1, edge2, edge3 = self._get_edges()
return _surface_helpers.compute_area(
(edge1._nodes, edge2._nodes, edge3._nodes)
) | r"""The area of the current surface.
For surfaces in :math:`\mathbf{R}^2`, this computes the area via
Green's theorem. Using the vector field :math:`\mathbf{F} =
\left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2`
Green's theorem says twice the area is equal to
.. math::
\int_{B\left(\mathcal{U}\right)} 2 \, d\mathbf{x} =
\int_{\partial B\left(\mathcal{U}\right)} -y \, dx + x \, dy.
This relies on the assumption that the current surface is valid, which
implies that the image of the unit triangle under the B |eacute| zier
map --- :math:`B\left(\mathcal{U}\right)` --- has the edges of the
surface as its boundary.
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current surface.
Raises:
NotImplementedError: If the current surface isn't in
:math:`\mathbf{R}^2`. |
def crypto_box_seal_open(ciphertext, pk, sk):
"""
Decrypts and returns an encrypted message ``ciphertext``, using the
recipent's secret key ``sk`` and the sender's ephemeral public key
embedded in the sealed box. The box contruct nonce is derived from
the recipient's public key ``pk`` and the sender's public key.
:param ciphertext: bytes
:param pk: bytes
:param sk: bytes
:rtype: bytes
.. versionadded:: 1.2
"""
ensure(isinstance(ciphertext, bytes),
"input ciphertext must be bytes",
raising=TypeError)
ensure(isinstance(pk, bytes),
"public key must be bytes",
raising=TypeError)
ensure(isinstance(sk, bytes),
"secret key must be bytes",
raising=TypeError)
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise exc.ValueError("Invalid public key")
if len(sk) != crypto_box_SECRETKEYBYTES:
raise exc.ValueError("Invalid secret key")
_clen = len(ciphertext)
ensure(_clen >= crypto_box_SEALBYTES,
("Input cyphertext must be "
"at least {} long").format(crypto_box_SEALBYTES),
raising=exc.TypeError)
_mlen = _clen - crypto_box_SEALBYTES
# zero-length malloc results are implementation.dependent
plaintext = ffi.new("unsigned char[]", max(1, _mlen))
res = lib.crypto_box_seal_open(plaintext, ciphertext, _clen, pk, sk)
ensure(res == 0, "An error occurred trying to decrypt the message",
raising=exc.CryptoError)
return ffi.buffer(plaintext, _mlen)[:] | Decrypts and returns an encrypted message ``ciphertext``, using the
recipent's secret key ``sk`` and the sender's ephemeral public key
embedded in the sealed box. The box contruct nonce is derived from
the recipient's public key ``pk`` and the sender's public key.
:param ciphertext: bytes
:param pk: bytes
:param sk: bytes
:rtype: bytes
.. versionadded:: 1.2 |
def _get_query(self, order=None, filters=None):
""" This method is just to evade code duplication in count() and get_content, since they do basically the same thing"""
order = self._get_order(order)
return self.posts.all().order_by(order) | This method is just to evade code duplication in count() and get_content, since they do basically the same thing |
def _value_iterator(self, task_name, param_name):
"""
Yield the parameter values, with optional deprecation warning as second tuple value.
The parameter value will be whatever non-_no_value that is yielded first.
"""
cp_parser = CmdlineParser.get_instance()
if cp_parser:
dest = self._parser_global_dest(param_name, task_name)
found = getattr(cp_parser.known_args, dest, None)
yield (self._parse_or_no_value(found), None)
yield (self._get_value_from_config(task_name, param_name), None)
if self._config_path:
yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']),
'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format(
self._config_path['section'], self._config_path['name'], task_name, param_name))
yield (self._default, None) | Yield the parameter values, with optional deprecation warning as second tuple value.
The parameter value will be whatever non-_no_value that is yielded first. |
def enumerate_global_imports(tokens):
"""
Returns a list of all globally imported modules (skips modules imported
inside of classes, methods, or functions). Example::
>>> enumerate_global_modules(tokens)
['sys', 'os', 'tokenize', 're']
.. note::
Does not enumerate imports using the 'from' or 'as' keywords.
"""
imported_modules = []
import_line = False
from_import = False
parent_module = ""
function_count = 0
indentation = 0
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.INDENT:
indentation += 1
elif token_type == tokenize.DEDENT:
indentation -= 1
elif token_type == tokenize.NEWLINE:
import_line = False
from_import = False
elif token_type == tokenize.NAME:
if token_string in ["def", "class"]:
function_count += 1
if indentation == function_count - 1:
function_count -= 1
elif function_count >= indentation:
if token_string == "import":
import_line = True
elif token_string == "from":
from_import = True
elif import_line:
if token_type == tokenize.NAME \
and tokens[index+1][1] != 'as':
if not from_import \
and token_string not in reserved_words:
if token_string not in imported_modules:
if tokens[index+1][1] == '.': # module.module
parent_module = token_string + '.'
else:
if parent_module:
module_string = (
parent_module + token_string)
imported_modules.append(module_string)
parent_module = ''
else:
imported_modules.append(token_string)
return imported_modules | Returns a list of all globally imported modules (skips modules imported
inside of classes, methods, or functions). Example::
>>> enumerate_global_modules(tokens)
['sys', 'os', 'tokenize', 're']
.. note::
Does not enumerate imports using the 'from' or 'as' keywords. |
def chdir(self,
path,
timeout=shutit_global.shutit_global_object.default_timeout,
note=None,
loglevel=logging.DEBUG):
"""How to change directory will depend on whether we are in delivery mode bash or docker.
@param path: Path to send file to.
@param timeout: Timeout on response
@param note: See send()
"""
shutit = self.shutit
shutit.handle_note(note, 'Changing to path: ' + path)
shutit.log('Changing directory to path: "' + path + '"', level=logging.DEBUG)
if shutit.build['delivery'] in ('bash','dockerfile'):
self.send(ShutItSendSpec(self,
send=' command cd "' + path + '"',
timeout=timeout,
echo=False,
loglevel=loglevel))
elif shutit.build['delivery'] in ('docker',):
os.chdir(path)
else:
shutit.fail('chdir not supported for delivery method: ' + str(shutit.build['delivery'])) # pragma: no cover
shutit.handle_note_after(note=note)
return True | How to change directory will depend on whether we are in delivery mode bash or docker.
@param path: Path to send file to.
@param timeout: Timeout on response
@param note: See send() |
def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
kurt_size=6000, ext_blocks=1, max_iter=200,
random_state=None, verbose=None):
"""Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator.
"""
rng = check_random_state(random_state)
# define some default parameter
max_weight = 1e8
restart_fac = 0.9
min_l_rate = 1e-10
blowup = 1e4
blowup_fac = 0.5
n_small_angle = 20
degconst = 180.0 / np.pi
# for extended Infomax
extmomentum = 0.5
signsbias = 0.02
signcount_threshold = 25
signcount_step = 2
if ext_blocks > 0: # allow not to recompute kurtosis
n_subgauss = 1 # but initialize n_subgauss to 1 if you recompute
# check data shape
n_samples, n_features = data.shape
n_features_square = n_features ** 2
# check input parameter
# heuristic default - may need adjustment for
# large or tiny data sets
if l_rate is None:
l_rate = 0.01 / math.log(n_features ** 2.0)
if block is None:
block = int(math.floor(math.sqrt(n_samples / 3.0)))
logger.info('computing%sInfomax ICA' % ' Extended ' if extended is True
else ' ')
# collect parameter
nblock = n_samples // block
lastt = (nblock - 1) * block + 1
# initialize training
if weights is None:
# initialize weights as identity matrix
weights = np.identity(n_features, dtype=np.float64)
BI = block * np.identity(n_features, dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
step = 0
count_small_angle = 0
wts_blowup = False
blockno = 0
signcount = 0
# for extended Infomax
if extended is True:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
kurt_size = min(kurt_size, n_samples)
old_kurt = np.zeros(n_features, dtype=np.float64)
oldsigns = np.zeros((n_features, n_features))
# trainings loop
olddelta, oldchange = 1., 0.
while step < max_iter:
# shuffle data at each step
permute = list(range(n_samples))
rng.shuffle(permute)
# ICA training block
# loop across block samples
for t in range(0, lastt, block):
u = np.dot(data[permute[t:t + block], :], weights)
u += np.dot(bias, onesrow).T
if extended is True:
# extended ICA update
y = np.tanh(u)
weights += l_rate * np.dot(weights,
BI - np.dot(np.dot(u.T, y), signs) -
np.dot(u.T, u))
bias += l_rate * np.reshape(np.sum(y, axis=0,
dtype=np.float64) * -2.0,
(n_features, 1))
else:
# logistic ICA weights update
y = 1.0 / (1.0 + np.exp(-u))
weights += l_rate * np.dot(weights,
BI + np.dot(u.T, (1.0 - 2.0 * y)))
bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
dtype=np.float64), (n_features, 1))
# check change limit
max_weight_val = np.max(np.abs(weights))
if max_weight_val > max_weight:
wts_blowup = True
blockno += 1
if wts_blowup:
break
# ICA kurtosis estimation
if extended is True:
n = np.fix(blockno / ext_blocks)
if np.abs(n) * ext_blocks == blockno:
if kurt_size < n_samples:
rp = np.floor(rng.uniform(0, 1, kurt_size) *
(n_samples - 1))
tpartact = np.dot(data[rp.astype(int), :], weights).T
else:
tpartact = np.dot(data, weights).T
# estimate kurtosis
kurt = kurtosis(tpartact, axis=1, fisher=True)
if extmomentum != 0:
kurt = (extmomentum * old_kurt +
(1.0 - extmomentum) * kurt)
old_kurt = kurt
# estimate weighted signs
signs.flat[::n_features + 1] = ((kurt + signsbias) /
np.abs(kurt + signsbias))
ndiff = ((signs.flat[::n_features + 1] -
oldsigns.flat[::n_features + 1]) != 0).sum()
if ndiff == 0:
signcount += 1
else:
signcount = 0
oldsigns = signs
if signcount >= signcount_threshold:
ext_blocks = np.fix(ext_blocks * signcount_step)
signcount = 0
# here we continue after the for
# loop over the ICA training blocks
# if weights in bounds:
if not wts_blowup:
oldwtchange = weights - oldweights
step += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, n_features_square)
change = np.sum(delta * delta, dtype=np.float64)
if step > 1:
angledelta = math.acos(np.sum(delta * olddelta) /
math.sqrt(change * oldchange))
angledelta *= degconst
# anneal learning rate
oldweights = weights.copy()
if angledelta > anneal_deg:
l_rate *= anneal_step # anneal learning rate
# accumulate angledelta until anneal_deg reached l_rates
olddelta = delta
oldchange = change
count_small_angle = 0 # reset count when angle delta is large
else:
if step == 1: # on first step only
olddelta = delta # initialize
oldchange = change
count_small_angle += 1
if count_small_angle > n_small_angle:
max_iter = step
# apply stopping rule
if step > 2 and change < w_change:
step = max_iter
elif change > blowup:
l_rate *= blowup_fac
# restart if weights blow up
# (for lowering l_rate)
else:
step = 0 # start again
wts_blowup = 0 # re-initialize variables
blockno = 1
l_rate *= restart_fac # with lower learning rate
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, n_features_square), dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
# for extended Infomax
if extended:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
oldsigns = np.zeros((n_features, n_features))
if l_rate > min_l_rate:
if verbose:
logger.info('... lowering learning rate to %g'
'\n... re-starting...' % l_rate)
else:
raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
'might not be invertible!')
# prepare return values
return weights.T | Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator. |
def from_iter(cls, data, name=None):
"""Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
"""
if not name:
name = 'table'
if isinstance(data, (list, tuple)):
data = {x: y for x, y in enumerate(data)}
values = [{'idx': k, 'col': 'data', 'val': v}
for k, v in sorted(data.items())]
return cls(name, values=values) | Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``. |
def astype(self, dtype, undefined_on_failure=False):
"""
Create a new SArray with all values cast to the given type. Throws an
exception if the types are not castable to the given type.
Parameters
----------
dtype : {int, float, str, list, array.array, dict, datetime.datetime}
The type to cast the elements to in SArray
undefined_on_failure: bool, optional
If set to True, runtime cast failures will be emitted as missing
values rather than failing.
Returns
-------
out : SArray [dtype]
The SArray converted to the type ``dtype``.
Notes
-----
- The string parsing techniques used to handle conversion to dictionary
and list types are quite generic and permit a variety of interesting
formats to be interpreted. For instance, a JSON string can usually be
interpreted as a list or a dictionary type. See the examples below.
- For datetime-to-string and string-to-datetime conversions,
use sa.datetime_to_str() and sa.str_to_datetime() functions.
- For array.array to turicreate.Image conversions, use sa.pixel_array_to_image()
Examples
--------
>>> sa = turicreate.SArray(['1','2','3','4'])
>>> sa.astype(int)
dtype: int
Rows: 4
[1, 2, 3, 4]
Given an SArray of strings that look like dicts, convert to a dictionary
type:
>>> sa = turicreate.SArray(['{1:2 3:4}', '{a:b c:d}'])
>>> sa.astype(dict)
dtype: dict
Rows: 2
[{1: 2, 3: 4}, {'a': 'b', 'c': 'd'}]
"""
if (dtype == _Image) and (self.dtype == array.array):
raise TypeError("Cannot cast from image type to array with sarray.astype(). Please use sarray.pixel_array_to_image() instead.")
with cython_context():
return SArray(_proxy=self.__proxy__.astype(dtype, undefined_on_failure)) | Create a new SArray with all values cast to the given type. Throws an
exception if the types are not castable to the given type.
Parameters
----------
dtype : {int, float, str, list, array.array, dict, datetime.datetime}
The type to cast the elements to in SArray
undefined_on_failure: bool, optional
If set to True, runtime cast failures will be emitted as missing
values rather than failing.
Returns
-------
out : SArray [dtype]
The SArray converted to the type ``dtype``.
Notes
-----
- The string parsing techniques used to handle conversion to dictionary
and list types are quite generic and permit a variety of interesting
formats to be interpreted. For instance, a JSON string can usually be
interpreted as a list or a dictionary type. See the examples below.
- For datetime-to-string and string-to-datetime conversions,
use sa.datetime_to_str() and sa.str_to_datetime() functions.
- For array.array to turicreate.Image conversions, use sa.pixel_array_to_image()
Examples
--------
>>> sa = turicreate.SArray(['1','2','3','4'])
>>> sa.astype(int)
dtype: int
Rows: 4
[1, 2, 3, 4]
Given an SArray of strings that look like dicts, convert to a dictionary
type:
>>> sa = turicreate.SArray(['{1:2 3:4}', '{a:b c:d}'])
>>> sa.astype(dict)
dtype: dict
Rows: 2
[{1: 2, 3: 4}, {'a': 'b', 'c': 'd'}] |
def convert_svhn(which_format, directory, output_directory,
output_filename=None):
"""Converts the SVHN dataset to HDF5.
Converts the SVHN dataset [SVHN] to an HDF5 dataset compatible
with :class:`fuel.datasets.SVHN`. The converted dataset is
saved as 'svhn_format_1.hdf5' or 'svhn_format_2.hdf5', depending
on the `which_format` argument.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
Parameters
----------
which_format : int
Either 1 or 2. Determines which format (format 1: full numbers
or format 2: cropped digits) to convert.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5' or
'svhn_format_2.hdf5', depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if which_format not in (1, 2):
raise ValueError("SVHN format needs to be either 1 or 2.")
if not output_filename:
output_filename = 'svhn_format_{}.hdf5'.format(which_format)
if which_format == 1:
return convert_svhn_format_1(
directory, output_directory, output_filename)
else:
return convert_svhn_format_2(
directory, output_directory, output_filename) | Converts the SVHN dataset to HDF5.
Converts the SVHN dataset [SVHN] to an HDF5 dataset compatible
with :class:`fuel.datasets.SVHN`. The converted dataset is
saved as 'svhn_format_1.hdf5' or 'svhn_format_2.hdf5', depending
on the `which_format` argument.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
Parameters
----------
which_format : int
Either 1 or 2. Determines which format (format 1: full numbers
or format 2: cropped digits) to convert.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5' or
'svhn_format_2.hdf5', depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset. |
def flesh_out(X, W, embed_dim, CC_labels, dist_mult=2.0, angle_thresh=0.2,
min_shortcircuit=4, max_degree=5, verbose=False):
'''Given a connected graph adj matrix (W), add edges to flesh it out.'''
W = W.astype(bool)
assert np.all(W == W.T), 'graph given to flesh_out must be symmetric'
D = pairwise_distances(X, metric='sqeuclidean')
# compute average edge lengths for each point
avg_edge_length = np.empty(X.shape[0])
for i,nbr_mask in enumerate(W):
avg_edge_length[i] = D[i,nbr_mask].mean()
# candidate edges must satisfy edge length for at least one end point
dist_thresh = dist_mult * avg_edge_length
dist_mask = (D < dist_thresh) | (D < dist_thresh[:,None])
# candidate edges must connect points >= min_shortcircuit hops away
hops_mask = np.isinf(dijkstra(W, unweighted=True, limit=min_shortcircuit-1))
# candidate edges must not already be connected, or in the same initial CC
CC_mask = CC_labels != CC_labels[:,None]
candidate_edges = ~W & dist_mask & hops_mask & CC_mask
if verbose: # pragma: no cover
print('before F:', candidate_edges.sum(), 'potentials')
# calc subspaces
subspaces, _ = cluster_subspaces(X, embed_dim, CC_labels.max()+1, CC_labels)
# upper triangular avoids p,q <-> q,p repeats
ii,jj = np.where(np.triu(candidate_edges))
# Get angles
edge_dirs = X[ii] - X[jj]
ssi = subspaces[CC_labels[ii]]
ssj = subspaces[CC_labels[jj]]
F = edge_cluster_angle(edge_dirs, ssi, ssj)
mask = F < angle_thresh
edge_ii = ii[mask]
edge_jj = jj[mask]
edge_order = np.argsort(F[mask])
if verbose: # pragma: no cover
print('got', len(edge_ii), 'potential edges')
# Prevent any one node from getting a really high degree
degree = W.sum(axis=0)
sorted_edges = np.column_stack((edge_ii, edge_jj))[edge_order]
for e in sorted_edges:
if degree[e].max() < max_degree:
W[e[0],e[1]] = True
W[e[1],e[0]] = True
degree[e] += 1
return Graph.from_adj_matrix(np.where(W, np.sqrt(D), 0)) | Given a connected graph adj matrix (W), add edges to flesh it out. |
def compare(orderby_item1, orderby_item2):
"""compares the two orderby item pairs.
:param dict orderby_item1:
:param dict orderby_item2:
:return:
Integer comparison result.
The comparator acts such that
- if the types are different we get:
Undefined value < Null < booleans < Numbers < Strings
- if both arguments are of the same type:
it simply compares the values.
:rtype: int
"""
type1_ord = _OrderByHelper.getTypeOrd(orderby_item1)
type2_ord = _OrderByHelper.getTypeOrd(orderby_item2)
type_ord_diff = type1_ord - type2_ord
if type_ord_diff:
return type_ord_diff
# the same type,
if type1_ord == 0:
return 0
return _compare_helper(orderby_item1['item'], orderby_item2['item']) | compares the two orderby item pairs.
:param dict orderby_item1:
:param dict orderby_item2:
:return:
Integer comparison result.
The comparator acts such that
- if the types are different we get:
Undefined value < Null < booleans < Numbers < Strings
- if both arguments are of the same type:
it simply compares the values.
:rtype: int |
def local_fehdist(feh):
"""feh PDF based on local SDSS distribution
From Jo Bovy:
https://github.com/jobovy/apogee/blob/master/apogee/util/__init__.py#L3
2D gaussian fit based on Casagrande (2011)
"""
fehdist= 0.8/0.15*np.exp(-0.5*(feh-0.016)**2./0.15**2.)\
+0.2/0.22*np.exp(-0.5*(feh+0.15)**2./0.22**2.)
return fehdist | feh PDF based on local SDSS distribution
From Jo Bovy:
https://github.com/jobovy/apogee/blob/master/apogee/util/__init__.py#L3
2D gaussian fit based on Casagrande (2011) |
def set_status(self, action, target):
"""
Sets query status with format: "{domain} ({action}) {target}"
"""
try:
target = unquote(target)
except (AttributeError, TypeError):
pass
status = "%s (%s) %s" % (self.domain, action, target)
status = status.strip().replace('\n', '')
if len(status) >= self.MAXWIDTH:
tail = '...'
extent = self.MAXWIDTH - (len(tail) + self.RPAD)
self.status = status[:extent] + tail
else:
self.status = status | Sets query status with format: "{domain} ({action}) {target}" |
def connect_to_ec2(region='us-east-1', access_key=None, secret_key=None):
""" Connect to AWS ec2
:type region: str
:param region: AWS region to connect to
:type access_key: str
:param access_key: AWS access key id
:type secret_key: str
:param secret_key: AWS secret access key
:returns: boto.ec2.connection.EC2Connection -- EC2 connection
"""
if access_key:
# Connect using supplied credentials
logger.info('Connecting to AWS EC2 in {}'.format(region))
connection = ec2.connect_to_region(
region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
else:
# Fetch instance metadata
metadata = get_instance_metadata(timeout=1, num_retries=1)
if metadata:
try:
region = metadata['placement']['availability-zone'][:-1]
except KeyError:
pass
# Connect using env vars or boto credentials
logger.info('Connecting to AWS EC2 in {}'.format(region))
connection = ec2.connect_to_region(region)
if not connection:
logger.error('An error occurred when connecting to EC2')
sys.exit(1)
return connection | Connect to AWS ec2
:type region: str
:param region: AWS region to connect to
:type access_key: str
:param access_key: AWS access key id
:type secret_key: str
:param secret_key: AWS secret access key
:returns: boto.ec2.connection.EC2Connection -- EC2 connection |
def raise_205(instance):
"""Abort the current request with a 205 (Reset Content) response code.
Clears out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 205
"""
instance.response.status = 205
instance.response.body = ''
instance.response.body_raw = None
raise ResponseException(instance.response) | Abort the current request with a 205 (Reset Content) response code.
Clears out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 205 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.