code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
parsed = urlparse(uri)
default_user = get_current_user()
password = unquote(parsed.password) if parsed.password else None
kwargs = {'host': parsed.hostname,
'port': parsed.port,
'dbname': parsed.path[1:] or default_user,
'user': parsed.username or default_user,
'password': password}
values = parse_qs(parsed.query)
if 'host' in values:
kwargs['host'] = values['host'][0]
for k in [k for k in values if k in KEYWORDS]:
kwargs[k] = values[k][0] if len(values[k]) == 1 else values[k]
try:
if kwargs[k].isdigit():
kwargs[k] = int(kwargs[k])
except AttributeError:
pass
return kwargs | def uri_to_kwargs(uri) | Return a URI as kwargs for connecting to PostgreSQL with psycopg2,
applying default values for non-specified areas of the URI.
:param str uri: The connection URI
:rtype: dict | 2.350061 | 2.426436 | 0.968524 |
value = 'http%s' % url[5:] if url[:5] == 'postgresql' else url
parsed = _urlparse.urlparse(value)
path, query = parsed.path, parsed.query
hostname = parsed.hostname if parsed.hostname else ''
return PARSED(parsed.scheme.replace('http', 'postgresql'),
parsed.netloc,
path,
parsed.params,
query,
parsed.fragment,
parsed.username,
parsed.password,
hostname.replace('%2f', '/'),
parsed.port) | def urlparse(url) | Parse the URL in a Python2/3 independent fashion.
:param str url: The URL to parse
:rtype: Parsed | 3.465995 | 3.988016 | 0.869103 |
self._freed = True
self._cleanup(self.cursor, self._fd) | def free(self) | Release the results and connection lock from the TornadoSession
object. This **must** be called after you finish processing the results
from :py:meth:`TornadoSession.query <queries.TornadoSession.query>` or
:py:meth:`TornadoSession.callproc <queries.TornadoSession.callproc>`
or the connection will not be able to be reused by other asynchronous
requests. | 17.509333 | 18.454742 | 0.948771 |
if self.pid not in self._pool_manager:
self._pool_manager.create(self.pid, self._pool_idle_ttl,
self._pool_max_size, self._ioloop.time) | def _ensure_pool_exists(self) | Create the pool in the pool manager if it does not exist. | 6.306632 | 4.699572 | 1.341959 |
future = concurrent.Future()
# Attempt to get a cached connection from the connection pool
try:
connection = self._pool_manager.get(self.pid, self)
self._connections[connection.fileno()] = connection
future.set_result(connection)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE)
except pool.NoIdleConnectionsError:
self._create_connection(future)
return future | def _connect(self) | Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError | 4.911685 | 4.113703 | 1.193981 |
LOGGER.debug('Creating a new connection for %s', self.pid)
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
try:
connection = self._psycopg2_connect(kwargs)
except (psycopg2.Error, OSError, socket.error) as error:
future.set_exception(error)
return
# Add the connection for use in _poll_connection
fd = connection.fileno()
self._connections[fd] = connection
def on_connected(cf):
if cf.exception():
self._cleanup_fd(fd, True)
future.set_exception(cf.exception())
else:
try:
# Add the connection to the pool
LOGGER.debug('Connection established for %s', self.pid)
self._pool_manager.add(self.pid, connection)
except (ValueError, pool.PoolException) as err:
LOGGER.exception('Failed to add %r to the pool', self.pid)
self._cleanup_fd(fd)
future.set_exception(err)
return
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2cffi connects and leaves the
# connection in a weird state: consts.STATUS_DATESTYLE,
# returning from Connection._setup without setting the state
# as const.STATUS_OK
if utils.PYPY:
connection.status = extensions.STATUS_READY
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
# Set the future result
future.set_result(connection)
# Add a future that fires once connected
self._futures[fd] = concurrent.Future()
self._ioloop.add_future(self._futures[fd], on_connected)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE) | def _create_connection(self, future) | Create a new PostgreSQL connection
:param tornado.concurrent.Future future: future for new conn result | 4.434087 | 4.421162 | 1.002923 |
future = concurrent.Future()
def on_connected(cf):
if cf.exception():
future.set_exception(cf.exception())
return
# Get the psycopg2 connection object and cursor
conn = cf.result()
cursor = self._get_cursor(conn)
def completed(qf):
if qf.exception():
self._incr_exceptions(conn)
err = qf.exception()
LOGGER.debug('Cleaning cursor due to exception: %r', err)
self._exec_cleanup(cursor, conn.fileno())
future.set_exception(err)
else:
self._incr_executions(conn)
value = Results(cursor, self._exec_cleanup, conn.fileno())
future.set_result(value)
# Setup a callback to wait on the query result
self._futures[conn.fileno()] = concurrent.Future()
# Add the future to the IOLoop
self._ioloop.add_future(self._futures[conn.fileno()],
completed)
# Get the cursor, execute the query
func = getattr(cursor, method)
try:
func(query, parameters)
except Exception as error:
future.set_exception(error)
# Ensure the pool exists for the connection
self._ensure_pool_exists()
# Grab a connection to PostgreSQL
self._ioloop.add_future(self._connect(), on_connected)
# Return the future for the query result
return future | def _execute(self, method, query, parameters=None) | Issue a query asynchronously on the server, mogrifying the
parameters against the sql statement and yielding the results
as a :py:class:`Results <queries.tornado_session.Results>` object.
This function reduces duplicate code for callproc and query by getting
the class attribute for the method passed in as the function to call.
:param str method: The method attribute to use
:param str query: The SQL statement or Stored Procedure name
:param list|dict parameters: A dictionary of query parameters
:rtype: Results
:raises: queries.DataError
:raises: queries.DatabaseError
:raises: queries.IntegrityError
:raises: queries.InternalError
:raises: queries.InterfaceError
:raises: queries.NotSupportedError
:raises: queries.OperationalError
:raises: queries.ProgrammingError | 4.039534 | 3.96846 | 1.01791 |
LOGGER.debug('Closing cursor and cleaning %s', fd)
try:
cursor.close()
except (psycopg2.Error, psycopg2.Warning) as error:
LOGGER.debug('Error closing the cursor: %s', error)
self._cleanup_fd(fd)
# If the cleanup callback exists, remove it
if self._cleanup_callback:
self._ioloop.remove_timeout(self._cleanup_callback)
# Create a new cleanup callback to clean the pool of idle connections
self._cleanup_callback = self._ioloop.add_timeout(
self._ioloop.time() + self._pool_idle_ttl + 1,
self._pool_manager.clean, self.pid) | def _exec_cleanup(self, cursor, fd) | Close the cursor, remove any references to the fd in internal state
and remove the fd from the ioloop.
:param psycopg2.extensions.cursor cursor: The cursor to close
:param int fd: The connection file descriptor | 3.880496 | 3.763165 | 1.031179 |
self._ioloop.remove_handler(fd)
if fd in self._connections:
try:
self._pool_manager.free(self.pid, self._connections[fd])
except pool.ConnectionNotFoundError:
pass
if close:
self._connections[fd].close()
del self._connections[fd]
if fd in self._futures:
del self._futures[fd] | def _cleanup_fd(self, fd, close=False) | Ensure the socket socket is removed from the IOLoop, the
connection stack, and futures stack.
:param int fd: The fd # to cleanup | 3.421891 | 3.297422 | 1.037748 |
self._pool_manager.get_connection(self.pid, conn).exceptions += 1 | def _incr_exceptions(self, conn) | Increment the number of exceptions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection | 15.214023 | 19.648876 | 0.774295 |
self._pool_manager.get_connection(self.pid, conn).executions += 1 | def _incr_executions(self, conn) | Increment the number of executions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection | 16.988785 | 21.24884 | 0.799516 |
if fd not in self._connections:
LOGGER.warning('Received IO event for non-existing connection')
return
self._poll_connection(fd) | def _on_io_events(self, fd=None, _events=None) | Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised | 6.705866 | 9.816114 | 0.683149 |
try:
state = self._connections[fd].poll()
except (OSError, socket.error) as error:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.OperationalError('Connection error (%s)' % error)
)
except (psycopg2.Error, psycopg2.Warning) as error:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(error)
else:
if state == extensions.POLL_OK:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_result(True)
elif state == extensions.POLL_WRITE:
self._ioloop.update_handler(fd, ioloop.IOLoop.WRITE)
elif state == extensions.POLL_READ:
self._ioloop.update_handler(fd, ioloop.IOLoop.READ)
elif state == extensions.POLL_ERROR:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.Error('Poll Error')) | def _poll_connection(self, fd) | Check with psycopg2 to see what action to take. If the state is
POLL_OK, we should have a pending callback for that fd.
:param int fd: The socket fd for the postgresql connection | 1.883603 | 1.879013 | 1.002443 |
import codecs
setuptools.setup(
name='wcwidth',
version='0.1.7',
description=("Measures number of Terminal column cells "
"of wide-character codes"),
long_description=codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read(),
author='Jeff Quast',
author_email='[email protected]',
license='MIT',
packages=['wcwidth', 'wcwidth.tests'],
url='https://github.com/jquast/wcwidth',
include_package_data=True,
test_suite='wcwidth.tests',
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Internationalization',
'Topic :: Terminals'
],
keywords=['terminal', 'emulator', 'wcwidth', 'wcswidth', 'cjk',
'combining', 'xterm', 'console', ],
cmdclass={'update': SetupUpdate},
) | def main() | Setup.py entry point. | 2.912986 | 2.857394 | 1.019455 |
import codecs
import glob
# read in,
data_in = codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read()
# search for beginning and end positions,
pos_begin = data_in.find(self.README_PATCH_FROM)
assert pos_begin != -1, (pos_begin, self.README_PATCH_FROM)
pos_begin += len(self.README_PATCH_FROM)
pos_end = data_in.find(self.README_PATCH_TO)
assert pos_end != -1, (pos_end, self.README_PATCH_TO)
glob_pattern = os.path.join(HERE, 'data', '*.txt')
file_descriptions = [
self._describe_file_header(fpath)
for fpath in glob.glob(glob_pattern)]
# patch,
data_out = (
data_in[:pos_begin] +
'\n\n' +
'\n'.join(file_descriptions) +
'\n\n' +
data_in[pos_end:]
)
# write.
print("patching {} ..".format(self.README_RST))
codecs.open(
self.README_RST, 'w', 'utf8').write(data_out) | def _do_readme_update(self) | Patch README.rst to reflect the data files used in release. | 2.741024 | 2.594478 | 1.056484 |
self._do_retrieve(self.EAW_URL, self.EAW_IN)
(version, date, values) = self._parse_east_asian(
fname=self.EAW_IN,
properties=(u'W', u'F',)
)
table = self._make_table(values)
self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', version, date, table) | def _do_east_asian(self) | Fetch and update east-asian tables. | 6.576388 | 5.819427 | 1.130075 |
self._do_retrieve(self.UCD_URL, self.UCD_IN)
(version, date, values) = self._parse_category(
fname=self.UCD_IN,
categories=('Me', 'Mn',)
)
table = self._make_table(values)
self._do_write(self.ZERO_OUT, 'ZERO_WIDTH', version, date, table) | def _do_zero_width(self) | Fetch and update zero width tables. | 9.998808 | 8.828263 | 1.132591 |
import collections
table = collections.deque()
start, end = values[0], values[0]
for num, value in enumerate(values):
if num == 0:
table.append((value, value,))
continue
start, end = table.pop()
if end == value - 1:
table.append((start, value,))
else:
table.append((start, end,))
table.append((value, value,))
return tuple(table) | def _make_table(values) | Return a tuple of lookup tables for given values. | 3.003666 | 2.73174 | 1.099543 |
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
print("{}/ created.".format(folder))
if not os.path.exists(fname):
with open(fname, 'wb') as fout:
print("retrieving {}.".format(url))
resp = urlopen(url)
fout.write(resp.read())
print("{} saved.".format(fname))
else:
print("re-using artifact {}".format(fname))
return fname | def _do_retrieve(url, fname) | Retrieve given url to target filepath fname. | 2.62775 | 2.586979 | 1.01576 |
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
if any(details.startswith(property)
for property in properties):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | def _parse_east_asian(fname, properties=(u'W', u'F',)) | Parse unicode east-asian width tables. | 2.834951 | 2.806762 | 1.010043 |
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
addrs, details = addrs.rstrip(), details.lstrip()
if any(details.startswith('{} #'.format(value))
for value in categories):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | def _parse_category(fname, categories) | Parse unicode category tables. | 2.99987 | 2.811601 | 1.066961 |
# pylint: disable=R0914
# Too many local variables (19/15) (col 4)
print("writing {} ..".format(fname))
import unicodedata
import datetime
import string
utc_now = datetime.datetime.utcnow()
indent = 4
with open(fname, 'w') as fout:
fout.write(
'\n'
"# Generated: {iso_utc}\n"
"# Source: {version}\n"
"# Date: {date}\n"
"{variable} = (".format(iso_utc=utc_now.isoformat(),
version=version,
date=date,
variable=variable,
variable_proper=variable.title()))
for start, end in table:
ucs_start, ucs_end = unichr(start), unichr(end)
hex_start, hex_end = ('0x{0:04x}'.format(start),
'0x{0:04x}'.format(end))
try:
name_start = string.capwords(unicodedata.name(ucs_start))
except ValueError:
name_start = u''
try:
name_end = string.capwords(unicodedata.name(ucs_end))
except ValueError:
name_end = u''
fout.write('\n' + (' ' * indent))
fout.write('({0}, {1},),'.format(hex_start, hex_end))
fout.write(' # {0:24s}..{1}'.format(
name_start[:24].rstrip() or '(nil)',
name_end[:24].rstrip()))
fout.write('\n)\n')
print("complete.") | def _do_write(fname, variable, version, date, table) | Write combining tables to filesystem as python code. | 3.270136 | 3.22011 | 1.015535 |
ucp = (ucs.encode('unicode_escape')[2:]
.decode('ascii')
.upper()
.lstrip('0'))
url = "http://codepoints.net/U+{}".format(ucp)
name = unicodedata.name(ucs)
return (u"libc,ours={},{} [--o{}o--] name={} val={} {}"
" ".format(wcwidth_libc, wcwidth_local, ucs, name, ord(ucs), url)) | def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local) | Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
:type comb_wc: int
:rtype: unicode | 10.285552 | 9.091536 | 1.131333 |
all_ucs = (ucs for ucs in
[unichr(val) for val in range(sys.maxunicode)]
if is_named(ucs) and isnt_combining(ucs))
libc_name = ctypes.util.find_library('c')
if not libc_name:
raise ImportError("Can't find C library.")
libc = ctypes.cdll.LoadLibrary(libc_name)
libc.wcwidth.argtypes = [ctypes.c_wchar, ]
libc.wcwidth.restype = ctypes.c_int
assert getattr(libc, 'wcwidth', None) is not None
assert getattr(libc, 'wcswidth', None) is not None
locale.setlocale(locale.LC_ALL, using_locale)
for ucs in all_ucs:
try:
_is_equal_wcwidth(libc, ucs)
except AssertionError as err:
print(err) | def main(using_locale=('en_US', 'UTF-8',)) | Program entry point.
Load the entire Unicode table into memory, excluding those that:
- are not named (func unicodedata.name returns empty string),
- are combining characters.
Using ``locale``, for each unicode character string compare libc's
wcwidth with local wcwidth.wcwidth() function; when they differ,
report a detailed AssertionError to stdout. | 3.173612 | 2.918585 | 1.087381 |
if opts['--wide'] is None:
opts['--wide'] = 2
else:
assert opts['--wide'] in ("1", "2"), opts['--wide']
if opts['--alignment'] is None:
opts['--alignment'] = 'left'
else:
assert opts['--alignment'] in ('left', 'right'), opts['--alignment']
opts['--wide'] = int(opts['--wide'])
opts['character_factory'] = WcWideCharacterGenerator
if opts['--combining']:
opts['character_factory'] = WcCombinedCharacterGenerator
return opts | def validate_args(opts) | Validate and return options provided by docopt parsing. | 3.658015 | 3.618355 | 1.010961 |
term = Terminal()
style = Style()
# if the terminal supports colors, use a Style instance with some
# standout colors (magenta, cyan).
if term.number_of_colors:
style = Style(attr_major=term.magenta,
attr_minor=term.bright_cyan,
alignment=opts['--alignment'])
style.name_len = term.width - 15
screen = Screen(term, style, wide=opts['--wide'])
pager = Pager(term, screen, opts['character_factory'])
with term.location(), term.cbreak(), \
term.fullscreen(), term.hidden_cursor():
pager.run(writer=echo, reader=term.inkey)
return 0 | def main(opts) | Program entry point. | 8.042543 | 7.779984 | 1.033748 |
return sum((len(self.style.delimiter),
self.wide,
len(self.style.delimiter),
len(u' '),
UCS_PRINTLEN + 2,
len(u' '),
self.style.name_len,)) | def hint_width(self) | Width of a column segment. | 15.951753 | 14.903624 | 1.070327 |
delimiter = self.style.attr_minor(self.style.delimiter)
hint = self.style.header_hint * self.wide
heading = (u'{delimiter}{hint}{delimiter}'
.format(delimiter=delimiter, hint=hint))
alignment = lambda *args: (
self.term.rjust(*args) if self.style.alignment == 'right' else
self.term.ljust(*args))
txt = alignment(heading, self.hint_width, self.style.header_fill)
return self.style.attr_major(txt) | def head_item(self) | Text of a single column heading. | 6.104243 | 6.052117 | 1.008613 |
delim = self.style.attr_minor(self.style.delimiter)
txt = self.intro_msg_fmt.format(delim=delim).rstrip()
return self.term.center(txt) | def msg_intro(self) | Introductory message disabled above heading. | 13.813111 | 12.839695 | 1.075813 |
if self.term.is_a_tty:
return self.term.width // self.hint_width
return 1 | def num_columns(self) | Number of columns displayed. | 13.668602 | 8.479787 | 1.611904 |
# pylint: disable=W0613
# Unused argument 'args'
self.screen.style.name_len = min(self.screen.style.name_len,
self.term.width - 15)
assert self.term.width >= self.screen.hint_width, (
'Screen to small {}, must be at least {}'.format(
self.term.width, self.screen.hint_width))
self._set_lastpage()
self.dirty = self.STATE_REFRESH | def on_resize(self, *args) | Signal handler callback for SIGWINCH. | 6.986864 | 6.219445 | 1.12339 |
self.last_page = (len(self._page_data) - 1) // self.screen.page_size | def _set_lastpage(self) | Calculate value of class attribute ``last_page``. | 6.631663 | 4.681802 | 1.416477 |
echo(self.term.home + self.term.clear)
echo(self.term.move_y(self.term.height // 2))
echo(self.term.center('Initializing page data ...').rstrip())
flushout()
if LIMIT_UCS == 0x10000:
echo('\n\n')
echo(self.term.blink_red(self.term.center(
'narrow Python build: upperbound value is {n}.'
.format(n=LIMIT_UCS)).rstrip()))
echo('\n\n')
flushout() | def display_initialize(self) | Display 'please wait' message, and narrow build warning. | 8.088936 | 7.904029 | 1.023394 |
if self.term.is_a_tty:
self.display_initialize()
self.character_generator = self.character_factory(self.screen.wide)
page_data = list()
while True:
try:
page_data.append(next(self.character_generator))
except StopIteration:
break
if LIMIT_UCS == 0x10000:
echo(self.term.center('press any key.').rstrip())
flushout()
self.term.inkey(timeout=None)
return page_data | def initialize_page_data(self) | Initialize the page data for the given screen. | 9.03126 | 8.126624 | 1.111318 |
size = self.screen.page_size
while offset < 0 and idx:
offset += size
idx -= 1
offset = max(0, offset)
while offset >= size:
offset -= size
idx += 1
if idx == self.last_page:
offset = 0
idx = min(max(0, idx), self.last_page)
start = (idx * self.screen.page_size) + offset
end = start + self.screen.page_size
return (idx, offset), self._page_data[start:end] | def page_data(self, idx, offset) | Return character data for page of given index and offset.
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: list of tuples in form of ``(ucs, name)``
:rtype: list[(unicode, unicode)] | 3.13719 | 3.20839 | 0.977808 |
page_idx = page_offset = 0
while True:
npage_idx, _ = self.draw(writer, page_idx + 1, page_offset)
if npage_idx == self.last_page:
# page displayed was last page, quit.
break
page_idx = npage_idx
self.dirty = self.STATE_DIRTY
return | def _run_notty(self, writer) | Pager run method for terminals that are not a tty. | 6.723699 | 6.232821 | 1.078757 |
# allow window-change signal to reflow screen
signal.signal(signal.SIGWINCH, self.on_resize)
page_idx = page_offset = 0
while True:
if self.dirty:
page_idx, page_offset = self.draw(writer,
page_idx,
page_offset)
self.dirty = self.STATE_CLEAN
inp = reader(timeout=0.25)
if inp is not None:
nxt, noff = self.process_keystroke(inp,
page_idx,
page_offset)
if not self.dirty:
self.dirty = nxt != page_idx or noff != page_offset
page_idx, page_offset = nxt, noff
if page_idx == -1:
return | def _run_tty(self, writer, reader) | Pager run method for terminals that are a tty. | 4.446747 | 4.282129 | 1.038443 |
self._page_data = self.initialize_page_data()
self._set_lastpage()
if not self.term.is_a_tty:
self._run_notty(writer)
else:
self._run_tty(writer, reader) | def run(self, writer, reader) | Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param reader: callable reads keystrokes from input stream, sending
instance of blessed.keyboard.Keystroke.
:type reader: callable | 6.304342 | 5.840881 | 1.079348 |
if inp.lower() in (u'q', u'Q'):
# exit
return (-1, -1)
self._process_keystroke_commands(inp)
idx, offset = self._process_keystroke_movement(inp, idx, offset)
return idx, offset | def process_keystroke(self, inp, idx, offset) | Process keystroke ``inp``, adjusting screen parameters.
:param inp: return value of Terminal.inkey().
:type inp: blessed.keyboard.Keystroke
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int) | 4.456269 | 4.09636 | 1.087861 |
if inp in (u'1', u'2'):
# chose 1 or 2-character wide
if int(inp) != self.screen.wide:
self.screen.wide = int(inp)
self.on_resize(None, None)
elif inp in (u'_', u'-'):
# adjust name length -2
nlen = max(1, self.screen.style.name_len - 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp in (u'+', u'='):
# adjust name length +2
nlen = min(self.term.width - 8, self.screen.style.name_len + 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp == u'2' and self.screen.wide != 2:
# change 2 or 1-cell wide view
self.screen.wide = 2
self.on_resize(None, None) | def _process_keystroke_commands(self, inp) | Process keystrokes that issue commands (side effects). | 2.503141 | 2.533008 | 0.988209 |
term = self.term
if inp in (u'y', u'k') or inp.code in (term.KEY_UP,):
# scroll backward 1 line
idx, offset = (idx, offset - self.screen.num_columns)
elif inp in (u'e', u'j') or inp.code in (term.KEY_ENTER,
term.KEY_DOWN,):
# scroll forward 1 line
idx, offset = (idx, offset + self.screen.num_columns)
elif inp in (u'f', u' ') or inp.code in (term.KEY_PGDOWN,):
# scroll forward 1 page
idx, offset = (idx + 1, offset)
elif inp == u'b' or inp.code in (term.KEY_PGUP,):
# scroll backward 1 page
idx, offset = (max(0, idx - 1), offset)
elif inp.code in (term.KEY_SDOWN,):
# scroll forward 10 pages
idx, offset = (max(0, idx + 10), offset)
elif inp.code in (term.KEY_SUP,):
# scroll forward 10 pages
idx, offset = (max(0, idx - 10), offset)
elif inp.code == term.KEY_HOME:
# top
idx, offset = (0, 0)
elif inp.code == term.KEY_END:
# bottom
idx, offset = (self.last_page, 0)
return idx, offset | def _process_keystroke_movement(self, inp, idx, offset) | Process keystrokes that adjust index and offset. | 2.145597 | 2.108073 | 1.0178 |
# as our screen can be resized while we're mid-calculation,
# our self.dirty flag can become re-toggled; because we are
# not re-flowing our pagination, we must begin over again.
while self.dirty:
self.draw_heading(writer)
self.dirty = self.STATE_CLEAN
(idx, offset), data = self.page_data(idx, offset)
for txt in self.page_view(data):
writer(txt)
self.draw_status(writer, idx)
flushout()
return idx, offset | def draw(self, writer, idx, offset) | Draw the current page view to ``writer``.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param idx: current page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int) | 13.142763 | 12.497894 | 1.051598 |
if self.dirty == self.STATE_REFRESH:
writer(u''.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, '\n',
self.screen.header, '\n',)))
return True | def draw_heading(self, writer) | Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``. | 9.684248 | 6.352306 | 1.524525 |
if self.term.is_a_tty:
writer(self.term.hide_cursor())
style = self.screen.style
writer(self.term.move(self.term.height - 1))
if idx == self.last_page:
last_end = u'(END)'
else:
last_end = u'/{0}'.format(self.last_page)
txt = (u'Page {idx}{last_end} - '
u'{q} to quit, [keys: {keyset}]'
.format(idx=style.attr_minor(u'{0}'.format(idx)),
last_end=style.attr_major(last_end),
keyset=style.attr_major('kjfb12-='),
q=style.attr_minor(u'q')))
writer(self.term.center(txt).rstrip()) | def draw_status(self, writer, idx) | Conditionally draw status bar when output terminal is a tty.
:param writer: callable writes to output stream, receiving unicode.
:param idx: current page position index.
:type idx: int | 5.526968 | 5.213851 | 1.060055 |
if self.term.is_a_tty:
yield self.term.move(self.screen.row_begins, 0)
# sequence clears to end-of-line
clear_eol = self.term.clear_eol
# sequence clears to end-of-screen
clear_eos = self.term.clear_eos
# track our current column and row, where column is
# the whole segment of unicode value text, and draw
# only self.screen.num_columns before end-of-line.
#
# use clear_eol at end of each row to erase over any
# "ghosted" text, and clear_eos at end of screen to
# clear the same, especially for the final page which
# is often short.
col = 0
for ucs, name in data:
val = self.text_entry(ucs, name)
col += 1
if col == self.screen.num_columns:
col = 0
if self.term.is_a_tty:
val = u''.join((val, clear_eol, u'\n'))
else:
val = u''.join((val.rstrip(), u'\n'))
yield val
if self.term.is_a_tty:
yield u''.join((clear_eol, u'\n', clear_eos)) | def page_view(self, data) | Generator yields text to be displayed for the current unicode pageview.
:param data: The current page's data as tuple of ``(ucs, name)``.
:rtype: generator | 6.285541 | 5.577068 | 1.127033 |
style = self.screen.style
if len(name) > style.name_len:
idx = max(0, style.name_len - len(style.continuation))
name = u''.join((name[:idx], style.continuation if idx else u''))
if style.alignment == 'right':
fmt = u' '.join(('0x{val:0>{ucs_printlen}x}',
'{name:<{name_len}s}',
'{delimiter}{ucs}{delimiter}'
))
else:
fmt = u' '.join(('{delimiter}{ucs}{delimiter}',
'0x{val:0>{ucs_printlen}x}',
'{name:<{name_len}s}'))
delimiter = style.attr_minor(style.delimiter)
if len(ucs) != 1:
# determine display of combining characters
val = ord(ucs[1])
# a combining character displayed of any fg color
# will reset the foreground character of the cell
# combined with (iTerm2, OSX).
disp_ucs = style.attr_major(ucs[0:2])
if len(ucs) > 2:
disp_ucs += ucs[2]
else:
# non-combining
val = ord(ucs)
disp_ucs = style.attr_major(ucs)
return fmt.format(name_len=style.name_len,
ucs_printlen=UCS_PRINTLEN,
delimiter=delimiter,
name=name,
ucs=disp_ucs,
val=val) | def text_entry(self, ucs, name) | Display a single column segment row describing ``(ucs, name)``.
:param ucs: target unicode point character string.
:param name: name of unicode point.
:rtype: unicode | 4.902145 | 4.779487 | 1.025663 |
if raise_err is None:
raise_err = False if ret_err else True
cmd_is_seq = isinstance(cmd, (list, tuple))
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=not cmd_is_seq)
out, err = proc.communicate()
retcode = proc.returncode
cmd_str = ' '.join(cmd) if cmd_is_seq else cmd
if retcode is None:
proc.terminate()
raise RuntimeError(cmd_str + ' process did not terminate')
if raise_err and retcode != 0:
raise RuntimeError('{0} returned code {1} with error {2}'.format(
cmd_str, retcode, err.decode('latin-1')))
out = out.strip()
if as_str:
out = out.decode('latin-1')
if not ret_err:
return out
err = err.strip()
if as_str:
err = err.decode('latin-1')
return out, err | def back_tick(cmd, ret_err=False, as_str=True, raise_err=None) | Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
Roughly equivalent to ``check_output`` in Python 2.7
Parameters
----------
cmd : sequence
command to execute
ret_err : bool, optional
If True, return stderr in addition to stdout. If False, just return
stdout
as_str : bool, optional
Whether to decode outputs to unicode string on exit.
raise_err : None or bool, optional
If True, raise RuntimeError for non-zero return code. If None, set to
True when `ret_err` is False, False if `ret_err` is True
Returns
-------
out : str or tuple
If `ret_err` is False, return stripped string containing stdout from
`cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where
``stdout`` is the stripped stdout, and ``stderr`` is the stripped
stderr.
Raises
------
Raises RuntimeError if command returns non-zero exit code and `raise_err`
is True | 2.169631 | 2.305852 | 0.940924 |
uniques = []
for element in sequence:
if element not in uniques:
uniques.append(element)
return uniques | def unique_by_index(sequence) | unique elements in `sequence` in the order in which they occur
Parameters
----------
sequence : iterable
Returns
-------
uniques : list
unique elements of sequence, ordered by the order in which the element
occurs in `sequence` | 2.267486 | 2.836766 | 0.799321 |
def decorator(f):
def modify(filename, *args, **kwargs):
m = chmod_perms(filename) if exists(filename) else mode_flags
if not m & mode_flags:
os.chmod(filename, m | mode_flags)
try:
return f(filename, *args, **kwargs)
finally:
# restore original permissions
if not m & mode_flags:
os.chmod(filename, m)
return modify
return decorator | def ensure_permissions(mode_flags=stat.S_IWUSR) | decorator to ensure a filename has given permissions.
If changed, original permissions are restored after the decorated
modification. | 2.979243 | 2.604473 | 1.143895 |
lines = _cmd_out_err(['otool', '-L', filename])
if not _line0_says_object(lines[0], filename):
return ()
names = tuple(parse_install_name(line)[0] for line in lines[1:])
install_id = get_install_id(filename)
if not install_id is None:
assert names[0] == install_id
return names[1:]
return names | def get_install_names(filename) | Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename` | 4.798578 | 5.231236 | 0.917293 |
lines = _cmd_out_err(['otool', '-D', filename])
if not _line0_says_object(lines[0], filename):
return None
if len(lines) == 1:
return None
if len(lines) != 2:
raise InstallNameError('Unexpected otool output ' + '\n'.join(lines))
return lines[1].strip() | def get_install_id(filename) | Return install id from library named in `filename`
Returns None if no install id, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_id : str
install id of library `filename`, or None if no install id | 5.304718 | 5.506626 | 0.963334 |
names = get_install_names(filename)
if oldname not in names:
raise InstallNameError('{0} not in install names for {1}'.format(
oldname, filename))
back_tick(['install_name_tool', '-change', oldname, newname, filename]) | def set_install_name(filename, oldname, newname) | Set install name `oldname` to `newname` in library filename
Parameters
----------
filename : str
filename of library
oldname : str
current install name in library
newname : str
replacement name for `oldname` | 3.447337 | 4.04857 | 0.851495 |
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) | def set_install_id(filename, install_id) | Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id | 5.568525 | 6.894362 | 0.807692 |
try:
lines = _cmd_out_err(['otool', '-l', filename])
except RuntimeError:
return ()
if not _line0_says_object(lines[0], filename):
return ()
lines = [line.strip() for line in lines]
paths = []
line_no = 1
while line_no < len(lines):
line = lines[line_no]
line_no += 1
if line != 'cmd LC_RPATH':
continue
cmdsize, path = lines[line_no:line_no+2]
assert cmdsize.startswith('cmdsize ')
paths.append(RPATH_RE.match(path).groups()[0])
line_no += 2
return tuple(paths) | def get_rpaths(filename) | Return a tuple of rpaths from the library `filename`
If `filename` is not a library then the returned tuple will be empty.
Parameters
----------
filaname : str
filename of library
Returns
-------
rpath : tuple
rpath paths in `filename` | 3.723567 | 3.912677 | 0.951667 |
z = zipfile.ZipFile(zip_fname, 'w',
compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(in_dir):
for file in files:
in_fname = pjoin(root, file)
in_stat = os.stat(in_fname)
# Preserve file permissions, but allow copy
info = zipfile.ZipInfo(in_fname)
info.filename = relpath(in_fname, in_dir)
if os.path.sep == '\\':
# Make the path unix friendly on windows.
# PyPI won't accept wheels with windows path separators
info.filename = relpath(in_fname, in_dir).replace('\\', '/')
# Set time from modification time
info.date_time = time.localtime(in_stat.st_mtime)
# See https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/48435482#48435482
# Also set regular file permissions
perms = stat.S_IMODE(in_stat.st_mode) | stat.S_IFREG
info.external_attr = perms << 16
with open_readable(in_fname, 'rb') as fobj:
contents = fobj.read()
z.writestr(info, contents, zipfile.ZIP_DEFLATED)
z.close() | def dir2zip(in_dir, zip_fname) | Make a zip file `zip_fname` with contents of directory `in_dir`
The recorded filenames are relative to `in_dir`, so doing a standard zip
unpack of the resulting `zip_fname` in an empty directory will result in
the original directory contents.
Parameters
----------
in_dir : str
Directory path containing files to go in the zip archive
zip_fname : str
Filename of zip archive to write | 2.899804 | 2.936224 | 0.987596 |
package_sdirs = set()
for entry in os.listdir(root_path):
fname = entry if root_path == '.' else pjoin(root_path, entry)
if isdir(fname) and exists(pjoin(fname, '__init__.py')):
package_sdirs.add(fname)
return package_sdirs | def find_package_dirs(root_path) | Find python package directories in directory `root_path`
Parameters
----------
root_path : str
Directory to search for package subdirectories
Returns
-------
package_sdirs : set
Set of strings where each is a subdirectory of `root_path`, containing
an ``__init__.py`` file. Paths prefixed by `root_path` | 2.8266 | 2.874171 | 0.983449 |
with open_readable(filename1, 'rb') as fobj:
contents1 = fobj.read()
with open_readable(filename2, 'rb') as fobj:
contents2 = fobj.read()
return contents1 == contents2 | def cmp_contents(filename1, filename2) | Returns True if contents of the files are the same
Parameters
----------
filename1 : str
filename of first file to compare
filename2 : str
filename of second file to compare
Returns
-------
tf : bool
True if binary contents of `filename1` is same as binary contents of
`filename2`, False otherwise. | 2.326733 | 2.371452 | 0.981143 |
if not exists(libname):
raise RuntimeError(libname + " is not a file")
try:
stdout = back_tick(['lipo', '-info', libname])
except RuntimeError:
return frozenset()
lines = [line.strip() for line in stdout.split('\n') if line.strip()]
# For some reason, output from lipo -info on .a file generates this line
if lines[0] == "input file {0} is not a fat file".format(libname):
line = lines[1]
else:
assert len(lines) == 1
line = lines[0]
for reggie in (
'Non-fat file: {0} is architecture: (.*)'.format(libname),
'Architectures in the fat file: {0} are: (.*)'.format(libname)):
reggie = re.compile(reggie)
match = reggie.match(line)
if not match is None:
return frozenset(match.groups()[0].split(' '))
raise ValueError("Unexpected output: '{0}' for {1}".format(
stdout, libname)) | def get_archs(libname) | Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64' | 3.969168 | 3.872916 | 1.024853 |
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
return # The existing signature is valid
if 'code object is not signed at all' in err:
return # File has no signature, and adding a new one isn't necessary
# This file's signature is invalid and needs to be replaced
replace_signature(filename, '-') | def validate_signature(filename) | Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file | 9.292333 | 9.78694 | 0.949463 |
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list) | def os_path_relpath(path, start=os.path.curdir) | Return a relative version of a path | 1.326522 | 1.270488 | 1.044104 |
for from_dirpath, dirnames, filenames in os.walk(from_tree):
to_dirpath = pjoin(to_tree, relpath(from_dirpath, from_tree))
# Copy any missing directories in to_path
for dirname in tuple(dirnames):
to_path = pjoin(to_dirpath, dirname)
if not exists(to_path):
from_path = pjoin(from_dirpath, dirname)
shutil.copytree(from_path, to_path)
# If copying, don't further analyze this directory
dirnames.remove(dirname)
for fname in filenames:
root, ext = splitext(fname)
from_path = pjoin(from_dirpath, fname)
to_path = pjoin(to_dirpath, fname)
if not exists(to_path):
_copyfile(from_path, to_path)
elif cmp_contents(from_path, to_path):
pass
elif ext in lib_exts:
# existing lib that needs fuse
lipo_fuse(from_path, to_path, to_path)
else:
# existing not-lib file not identical to source
_copyfile(from_path, to_path) | def fuse_trees(to_tree, from_tree, lib_exts=('.so', '.dylib', '.a')) | Fuse path `from_tree` into path `to_tree`
For each file in `from_tree` - check for library file extension (in
`lib_exts` - if present, check if there is a file with matching relative
path in `to_tree`, if so, use :func:`delocate.tools.lipo_fuse` to fuse the
two libraries together and write into `to_tree`. If any of these
conditions are not met, just copy the file from `from_tree` to `to_tree`.
Parameters
---------
to_tree : str
path of tree to fuse into (update into)
from_tree : str
path of tree to fuse from (update from)
lib_exts : sequence, optional
filename extensions for libraries | 2.921226 | 2.918797 | 1.000832 |
to_wheel, from_wheel, out_wheel = [
abspath(w) for w in (to_wheel, from_wheel, out_wheel)]
with InTemporaryDirectory():
zip2dir(to_wheel, 'to_wheel')
zip2dir(from_wheel, 'from_wheel')
fuse_trees('to_wheel', 'from_wheel')
rewrite_record('to_wheel')
dir2zip('to_wheel', out_wheel) | def fuse_wheels(to_wheel, from_wheel, out_wheel) | Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel` | 3.525126 | 3.996774 | 0.881993 |
copied_libs = {}
delocated_libs = set()
copied_basenames = set()
rp_root_path = realpath(root_path)
rp_lib_path = realpath(lib_path)
# Test for errors first to avoid getting half-way through changing the tree
for required, requirings in lib_dict.items():
if required.startswith('@'): # assume @rpath etc are correct
# But warn, because likely they are not
warnings.warn('Not processing required path {0} because it '
'begins with @'.format(required))
continue
r_ed_base = basename(required)
if relpath(required, rp_root_path).startswith('..'):
# Not local, plan to copy
if r_ed_base in copied_basenames:
raise DelocationError('Already planning to copy library with '
'same basename as: ' + r_ed_base)
if not exists(required):
raise DelocationError('library "{0}" does not exist'.format(
required))
copied_libs[required] = requirings
copied_basenames.add(r_ed_base)
else: # Is local, plan to set relative loader_path
delocated_libs.add(required)
# Modify in place now that we've checked for errors
for required in copied_libs:
shutil.copy(required, lib_path)
# Set rpath and install names for this copied library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(rp_lib_path, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/{0}/{1}'.format(
req_rel, basename(required)))
for required in delocated_libs:
# Set relative path for local library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(required, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/' + req_rel)
return copied_libs | def delocate_tree_libs(lib_dict, lib_path, root_path) | Move needed libraries in `lib_dict` into `lib_path`
`lib_dict` has keys naming libraries required by the files in the
corresponding value. Call the keys, "required libs". Call the values
"requiring objects".
Copy all the required libs to `lib_path`. Fix up the rpaths and install
names in the requiring objects to point to these new copies.
Exception: required libs within the directory tree pointed to by
`root_path` stay where they are, but we modify requiring objects to use
relative paths to these libraries.
Parameters
----------
lib_dict : dict
Dictionary with (key, value) pairs of (``depended_lib_path``,
``dependings_dict``) (see :func:`libsana.tree_libs`)
lib_path : str
Path in which to store copies of libs referred to in keys of
`lib_dict`. Assumed to exist
root_path : str, optional
Root directory of tree analyzed in `lib_dict`. Any required
library within the subtrees of `root_path` does not get copied, but
libraries linking to it have links adjusted to use relative path to
this library.
Returns
-------
copied_libs : dict
Filtered `lib_dict` dict containing only the (key, value) pairs from
`lib_dict` where the keys are the libraries copied to `lib_path``. | 4.365513 | 4.30743 | 1.013484 |
if copied_libs is None:
copied_libs = {}
else:
copied_libs = dict(copied_libs)
done = False
while not done:
in_len = len(copied_libs)
_copy_required(lib_path, copy_filt_func, copied_libs)
done = len(copied_libs) == in_len
return copied_libs | def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None) | Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added. | 2.54252 | 2.622015 | 0.969681 |
# Paths will be prepended with `lib_path`
lib_dict = tree_libs(lib_path)
# Map library paths after copy ('copied') to path before copy ('orig')
rp_lp = realpath(lib_path)
copied2orig = dict((pjoin(rp_lp, basename(c)), c) for c in copied_libs)
for required, requirings in lib_dict.items():
if not copy_filt_func is None and not copy_filt_func(required):
continue
if required.startswith('@'):
# May have been processed by us, or have some rpath, loader_path of
# its own. Either way, leave alone
continue
# Requiring names may well be the copies in lib_path. Replace the copy
# names with the original names for entry into `copied_libs`
procd_requirings = {}
# Set requiring lib install names to point to local copy
for requiring, orig_install_name in requirings.items():
set_install_name(requiring,
orig_install_name,
'@loader_path/' + basename(required))
# Make processed version of ``dependings_dict``
mapped_requiring = copied2orig.get(requiring, requiring)
procd_requirings[mapped_requiring] = orig_install_name
if required in copied_libs:
# Have copied this already, add any new requirings
copied_libs[required].update(procd_requirings)
continue
# Haven't see this one before, add entry to copied_libs
out_path = pjoin(lib_path, basename(required))
if exists(out_path):
raise DelocationError(out_path + ' already exists')
shutil.copy(required, lib_path)
copied2orig[out_path] = required
copied_libs[required] = procd_requirings | def _copy_required(lib_path, copy_filt_func, copied_libs) | Copy libraries required for files in `lib_path` to `lib_path`
Augment `copied_libs` dictionary with any newly copied libraries, modifying
`copied_libs` in-place - see Notes.
This is one pass of ``copy_recurse``
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each library name; copy where ``copy_filt_func(libname)`` is
True, don't copy otherwise
copied_libs : dict
See :func:`copy_recurse` for definition.
Notes
-----
If we need to copy another library, add that (``depended_lib_path``,
``dependings_dict``) to `copied_libs`. ``dependings_dict`` has (key,
value) pairs of (``depending_lib_path``, ``install_name``).
``depending_lib_path`` will be the original (canonical) library name, not
the copy in ``lib_path``.
Sometimes we copy a library, that further depends on a library we have
already copied. In this case update ``copied_libs[depended_lib]`` with the
extra dependency (as well as fixing up the install names for the depending
library).
For example, imagine we've start with a lib path like this::
my_lib_path/
libA.dylib
libB.dylib
Our input `copied_libs` has keys ``/sys/libA.dylib``, ``/sys/libB.lib``
telling us we previously copied those guys from the ``/sys`` folder.
On a first pass, we discover that ``libA.dylib`` depends on
``/sys/libC.dylib``, so we copy that.
On a second pass, we discover now that ``libC.dylib`` also depends on
``/sys/libB.dylib``. `copied_libs` tells us that we already have a copy of
``/sys/libB.dylib``, so we fix our copy of `libC.dylib`` to point to
``my_lib_path/libB.dylib`` and add ``/sys/libC.dylib`` as a
``dependings_dict`` entry for ``copied_libs['/sys/libB.dylib']`` | 6.334229 | 5.91127 | 1.071551 |
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
if not exists(lib_path):
os.makedirs(lib_path)
lib_dict = tree_libs(tree_path, lib_filt_func)
if not copy_filt_func is None:
lib_dict = dict((key, value) for key, value in lib_dict.items()
if copy_filt_func(key))
copied = delocate_tree_libs(lib_dict, lib_path, tree_path)
return copy_recurse(lib_path, copy_filt_func, copied) | def delocate_path(tree_path, lib_path,
lib_filt_func = None,
copy_filt_func = filter_system_libs) | Copy required libraries for files in `tree_path` into `lib_path`
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. | 2.950115 | 2.866607 | 1.029131 |
for required, requirings in d2.items():
if required in d1:
d1[required].update(requirings)
else:
d1[required] = requirings
return None | def _merge_lib_dict(d1, d2) | Merges lib_dict `d2` into lib_dict `d1` | 3.808742 | 3.401402 | 1.119756 |
in_wheel = abspath(in_wheel)
patch_fname = abspath(patch_fname)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
if not exists(patch_fname):
raise ValueError("patch file {0} does not exist".format(patch_fname))
with InWheel(in_wheel, out_wheel):
with open(patch_fname, 'rb') as fobj:
patch_proc = Popen(['patch', '-p1'],
stdin = fobj,
stdout = PIPE,
stderr = PIPE)
stdout, stderr = patch_proc.communicate()
if patch_proc.returncode != 0:
raise RuntimeError("Patch failed with stdout:\n" +
stdout.decode('latin1')) | def patch_wheel(in_wheel, patch_fname, out_wheel=None) | Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel` | 2.083468 | 2.24162 | 0.929447 |
if isinstance(require_archs, string_types):
require_archs = (['i386', 'x86_64'] if require_archs == 'intel'
else [require_archs])
require_archs = frozenset(require_archs)
bads = []
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads) | def check_archs(copied_libs, require_archs=(), stop_fast=False) | Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required. | 2.409355 | 1.944581 | 1.23901 |
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append("{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depended_lib)))
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append("Required {0} {1} missing from {2}".format(
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depending_lib)))
else:
raise ValueError('Report tuple should be length 2 or 3')
return '\n'.join(sorted(reports)) | def bads_report(bads, path_prefix=None) | Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing | 2.796139 | 2.235158 | 1.25098 |
lib_dict = {}
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_libpath = realpath(pjoin(dirpath, base))
if not filt_func is None and not filt_func(depending_libpath):
continue
rpaths = get_rpaths(depending_libpath)
for install_name in get_install_names(depending_libpath):
lib_path = (install_name if install_name.startswith('@')
else realpath(install_name))
lib_path = resolve_rpath(lib_path, rpaths)
if lib_path in lib_dict:
lib_dict[lib_path][depending_libpath] = install_name
else:
lib_dict[lib_path] = {depending_libpath: install_name}
return lib_dict | def tree_libs(start_path, filt_func=None) | Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html | 2.540508 | 2.228515 | 1.14 |
if not lib_path.startswith('@rpath/'):
return lib_path
lib_rpath = lib_path.split('/', 1)[1]
for rpath in rpaths:
rpath_lib = realpath(pjoin(rpath, lib_rpath))
if os.path.exists(rpath_lib):
return rpath_lib
warnings.warn(
"Couldn't find {0} on paths:\n\t{1}".format(
lib_path,
'\n\t'.join(realpath(path) for path in rpaths),
)
)
return lib_path | def resolve_rpath(lib_path, rpaths) | Return `lib_path` with its `@rpath` resolved
If the `lib_path` doesn't have `@rpath` then it's returned as is.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then a
detailed warning is printed and `lib_path` is returned as is.
Parameters
----------
lib_path : str
The path to a library file, which may or may not start with `@rpath`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
Returns
-------
lib_path : str
A str with the resolved libraries realpath. | 2.451395 | 2.355479 | 1.04072 |
n = len(strip_prefix)
def stripper(path):
return path if not path.startswith(strip_prefix) else path[n:]
return stripper | def get_prefix_stripper(strip_prefix) | Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
``a_string`` if present, otherwise pass ``a_string`` unmodified | 2.986485 | 4.894447 | 0.610178 |
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict | def stripped_lib_dict(lib_dict, strip_prefix) | Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths. | 3.680371 | 3.963366 | 0.928597 |
with TemporaryDirectory() as tmpdir:
zip2dir(wheel_fname, tmpdir)
lib_dict = tree_libs(tmpdir, filt_func)
return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep) | def wheel_libs(wheel_fname, filt_func = None) | Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree. | 6.124689 | 7.709787 | 0.794404 |
if sys.version_info[0] < 3:
return open_rw(name, mode + 'b')
return open_rw(name, mode, newline='', encoding='utf-8') | def _open_for_csv(name, mode) | Deal with Python 2/3 open API differences | 3.33038 | 2.853034 | 1.167312 |
info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info'))
if len(info_dirs) != 1:
raise WheelToolsError("Should be exactly one `*.dist_info` directory")
record_path = pjoin(info_dirs[0], 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
# Unsign wheel - because we're invalidating the record hash
sig_path = pjoin(info_dirs[0], 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield pjoin(dir, f)
def skip(path):
return (path == record_relpath)
with _open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relative_path = relpath(path, bdist_dir)
if skip(relative_path):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
path_for_record = relpath(
path, bdist_dir).replace(psep, '/')
writer.writerow((path_for_record, hash, size)) | def rewrite_record(bdist_dir) | Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file | 3.162329 | 3.031854 | 1.043035 |
in_wheel = abspath(in_wheel)
out_path = dirname(in_wheel) if out_path is None else abspath(out_path)
wf = WheelFile(in_wheel)
info_fname = _get_wheelinfo_name(wf)
# Check what tags we have
in_fname_tags = wf.parsed_filename.groupdict()['plat'].split('.')
extra_fname_tags = [tag for tag in platforms if tag not in in_fname_tags]
in_wheel_base, ext = splitext(basename(in_wheel))
out_wheel_base = '.'.join([in_wheel_base] + list(extra_fname_tags))
out_wheel = pjoin(out_path, out_wheel_base + ext)
if exists(out_wheel) and not clobber:
raise WheelToolsError('Not overwriting {0}; set clobber=True '
'to overwrite'.format(out_wheel))
with InWheelCtx(in_wheel) as ctx:
info = read_pkg_info(info_fname)
if info['Root-Is-Purelib'] == 'true':
raise WheelToolsError('Cannot add platforms to pure wheel')
in_info_tags = [tag for name, tag in info.items() if name == 'Tag']
# Python version, C-API version combinations
pyc_apis = ['-'.join(tag.split('-')[:2]) for tag in in_info_tags]
# unique Python version, C-API version combinations
pyc_apis = unique_by_index(pyc_apis)
# Add new platform tags for each Python version, C-API combination
required_tags = ['-'.join(tup) for tup in product(pyc_apis, platforms)]
needs_write = False
for req_tag in required_tags:
if req_tag in in_info_tags: continue
needs_write = True
info.add_header('Tag', req_tag)
if needs_write:
write_pkg_info(info_fname, info)
# Tell context manager to write wheel on exit by setting filename
ctx.out_wheel = out_wheel
return ctx.out_wheel | def add_platforms(in_wheel, platforms, out_path=None, clobber=False) | Add platform tags `platforms` to `in_wheel` filename and WHEEL tags
Add any platform tags in `platforms` that are missing from `in_wheel`
filename.
Add any platform tags in `platforms` that are missing from `in_wheel`
``WHEEL`` file.
Parameters
----------
in_wheel : str
Filename of wheel to which to add platform tags
platforms : iterable
platform tags to add to wheel filename and WHEEL tags - e.g.
``('macosx_10_9_intel', 'macosx_10_9_x86_64')
out_path : None or str, optional
Directory to which to write new wheel. Default is directory containing
`in_wheel`
clobber : bool, optional
If True, overwrite existing output filename, otherwise raise error
Returns
-------
out_wheel : None or str
Absolute path of wheel file written, or None if no wheel file written. | 3.399784 | 3.41975 | 0.994162 |
'''
Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node)
'''
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
bet = np.zeros([paths[['from', 'to']].max().max() +
1, paths['t_start'].max()+1])
for row in paths.iterrows():
if (np.isnan(row[1]['path includes'])).all():
pass
else:
nodes_in_path = np.unique(np.concatenate(
row[1]['path includes'])).astype(int).tolist()
nodes_in_path.remove(row[1]['from'])
nodes_in_path.remove(row[1]['to'])
if len(nodes_in_path) > 0:
bet[nodes_in_path, row[1]['t_start']] += 1
# Normalise bet
bet = (1/((bet.shape[0]-1)*(bet.shape[0]-2))) * bet
if calc == 'global':
bet = np.mean(bet, axis=1)
return bet | def temporal_betweenness_centrality(tnet=None, paths=None, calc='time') | Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node) | 5.136067 | 2.174543 | 2.361907 |
N = community.shape[0]
C = community.shape[1]
T = P = np.zeros([N, N])
for t in range(len(community[0, :])):
for i in range(len(community[:, 0])):
for j in range(len(community[:, 0])):
if i == j:
continue
# T_ij indicates the number of times that i and j are assigned to the same community across time
if community[i][t] == community[j][t]:
T[i, j] += 1
# module allegiance matrix, probability that ij were assigned to the same community
P = (1/C)*T
return P | def allegiance(community) | Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1 | 4.341524 | 3.232135 | 1.343237 |
if isinstance(ncontacts, list):
if len(ncontacts) != nnodes:
raise ValueError(
'Number of contacts, if a list, should be one per node')
if isinstance(lam, list):
if len(lam) != nnodes:
raise ValueError(
'Lambda value of Poisson distribution, if a list, should be one per node')
if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):
raise ValueError(
'When one of lambda or ncontacts is given as a list, the other argument must also be a list.')
if nettype == 'bu':
edgen = int((nnodes*(nnodes-1))/2)
elif nettype == 'bd':
edgen = int(nnodes*nnodes)
if not isinstance(lam, list) and not isinstance(ncontacts, list):
icts = np.random.poisson(lam, size=(edgen, ncontacts))
net = np.zeros([edgen, icts.sum(axis=1).max()+1])
for n in range(edgen):
net[n, np.unique(np.cumsum(icts[n]))] = 1
else:
icts = []
ict_max = 0
for n in range(edgen):
icts.append(np.random.poisson(lam[n], size=ncontacts[n]))
if sum(icts[-1]) > ict_max:
ict_max = sum(icts[-1])
net = np.zeros([nnodes, ict_max+1])
for n in range(nnodes):
net[n, np.unique(np.cumsum(icts[n]))] = 1
if nettype == 'bu':
nettmp = np.zeros([nnodes, nnodes, net.shape[-1]])
ind = np.triu_indices(nnodes, k=1)
nettmp[ind[0], ind[1], :] = net
net = nettmp + nettmp.transpose([1, 0, 2])
elif nettype == 'bd':
net = net.reshape([nnodes, nnodes, net.shape[-1]], order='F')
net = set_diagonal(net, 0)
if netrep == 'contact':
if not netinfo:
netinfo = {}
netinfo['nettype'] = 'b' + nettype[-1]
net = graphlet2contact(net, netinfo)
return net | def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet') | Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed. | 2.343398 | 2.287699 | 1.024347 |
r
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
# Calculate efficiency which is 1 over the mean path.
if calc == 'global':
eff = 1 / np.nanmean(pathmat)
elif calc == 'node' or calc == 'node_from':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=1)
elif calc == 'node_to':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=0)
return eff | def temporal_efficiency(tnet=None, paths=None, calc='global') | r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
Returns
-------
E : array
Global temporal efficiency | 3.28549 | 3.10582 | 1.057849 |
if len(array.shape) == 2:
array = np.array(array, ndmin=3).transpose([1, 2, 0])
teneto.utils.check_TemporalNetwork_input(array, 'array')
uvals = np.unique(array)
if len(uvals) == 2 and 1 in uvals and 0 in uvals:
i, j, t = np.where(array == 1)
self.network = pd.DataFrame(data={'i': i, 'j': j, 't': t})
else:
i, j, t = np.where(array != 0)
w = array[array != 0]
self.network = pd.DataFrame(
data={'i': i, 'j': j, 't': t, 'weight': w})
self.N = int(array.shape[0])
self.T = int(array.shape[-1])
self._update_network() | def network_from_array(self, array) | impo
Defines a network from an array.
Parameters
----------
array : array
3D numpy array. | 2.907504 | 2.946008 | 0.98693 |
teneto.utils.check_TemporalNetwork_input(df, 'df')
self.network = df
self._update_network() | def network_from_df(self, df) | Defines a network from an array.
Parameters
----------
array : array
Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index.
If weighted, should also include \'weight\'. Each row is an edge. | 19.128666 | 22.856434 | 0.836905 |
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
self.network = pd.DataFrame(edgelist, columns=colnames)
self._update_network() | def network_from_edgelist(self, edgelist) | Defines a network from an array.
Parameters
----------
edgelist : list of lists.
A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index.
For weighted networks each sublist should be [i, j, t, weight]. | 3.601045 | 3.452473 | 1.043034 |
self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list(
zip(*[self.network['i'].values, self.network['j'].values]))))
self.network.drop_duplicates(['ij', 't'], inplace=True)
self.network.reset_index(inplace=True, drop=True)
self.network.drop('ij', inplace=True, axis=1) | def _drop_duplicate_ij(self) | Drops duplicate entries from the network dataframe. | 3.032341 | 2.372205 | 1.278279 |
self.network = self.network.where(
self.network['i'] != self.network['j']).dropna()
self.network.reset_index(inplace=True, drop=True) | def _drop_diagonal(self) | Drops self-contacts from the network dataframe. | 4.037416 | 2.706006 | 1.49202 |
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
rows = hdf.get_storer('network').nrows
hdf.append('network', pd.DataFrame(edgelist, columns=colnames, index=np.arange(
rows, rows+len(edgelist))), format='table', data_columns=True)
edgelist = np.array(edgelist)
if np.max(edgelist[:, :2]) > self.netshape[0]:
self.netshape[0] = np.max(edgelist[:, :2])
if np.max(edgelist[:, 2]) > self.netshape[1]:
self.netshape[1] = np.max(edgelist[:, 2])
else:
newedges = pd.DataFrame(edgelist, columns=colnames)
self.network = pd.concat(
[self.network, newedges], ignore_index=True, sort=True)
self._update_network() | def add_edge(self, edgelist) | Adds an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.
Returns
--------
Updates TenetoBIDS.network dataframe with new edge | 2.439584 | 2.226959 | 1.095478 |
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
for e in edgelist:
hdf.remove(
'network', 'i == ' + str(e[0]) + ' & ' + 'j == ' + str(e[1]) + ' & ' + 't == ' + str(e[2]))
print('HDF5 delete warning. This will not reduce the size of the file.')
else:
for e in edgelist:
idx = self.network[(self.network['i'] == e[0]) & (
self.network['j'] == e[1]) & (self.network['t'] == e[2])].index
self.network.drop(idx, inplace=True)
self.network.reset_index(inplace=True, drop=True)
self._update_network() | def drop_edge(self, edgelist) | Removes an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be removes.
Returns
--------
Updates TenetoBIDS.network dataframe | 3.399093 | 3.002503 | 1.132086 |
availablemeasures = [f for f in dir(
teneto.networkmeasures) if not f.startswith('__')]
if networkmeasure not in availablemeasures:
raise ValueError(
'Unknown network measure. Available network measures are: ' + ', '.join(availablemeasures))
funs = inspect.getmembers(teneto.networkmeasures)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
measure = funs[networkmeasure](self, **measureparams)
return measure | def calc_networkmeasure(self, networkmeasure, **measureparams) | Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure] | 2.793876 | 2.548067 | 1.096469 |
availabletypes = [f for f in dir(
teneto.generatenetwork) if not f.startswith('__')]
if networktype not in availabletypes:
raise ValueError(
'Unknown network measure. Available networks to generate are: ' + ', '.join(availabletypes))
funs = inspect.getmembers(teneto.generatenetwork)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
network = funs[networktype](**networkparams)
self.network_from_array(network)
if self.nettype[1] == 'u':
self._drop_duplicate_ij() | def generatenetwork(self, networktype, **networkparams) | Generate a network
Parameters
-----------
networktype : str
Function to call. Functions available are in teneto.generatenetwork
measureparams : kwargs
kwargs for teneto.generatenetwork.[networktype]
Returns
--------
TenetoBIDS.network is made with the generated network. | 4.846408 | 4.341258 | 1.11636 |
if fname[-4:] != '.pkl':
fname += '.pkl'
with open(fname, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) | def save_aspickle(self, fname) | Saves object as pickle.
fname : str
file path. | 1.898882 | 2.123991 | 0.894016 |
if not report:
report = {}
# Due to rounding errors
data[data < -0.99999999999999] = -1
data[data > 0.99999999999999] = 1
fisher_data = 0.5 * np.log((1 + data) / (1 - data))
report['fisher'] = {}
report['fisher']['performed'] = 'yes'
#report['fisher']['diagonal'] = 'zeroed'
return fisher_data, report | def postpro_fisher(data, report=None) | Performs fisher transform on everything in data.
If report variable is passed, this is added to the report. | 3.069094 | 2.92538 | 1.049127 |
if not report:
report = {}
# Note the min value of all time series will now be at least 1.
mindata = 1 - np.nanmin(data)
data = data + mindata
ind = np.triu_indices(data.shape[0], k=1)
boxcox_list = np.array([sp.stats.boxcox(np.squeeze(
data[ind[0][n], ind[1][n], :])) for n in range(0, len(ind[0]))])
boxcox_data = np.zeros(data.shape)
boxcox_data[ind[0], ind[1], :] = np.vstack(boxcox_list[:, 0])
boxcox_data[ind[1], ind[0], :] = np.vstack(boxcox_list[:, 0])
bccheck = np.array(np.transpose(boxcox_data, [2, 0, 1]))
bccheck = (bccheck - bccheck.mean(axis=0)) / bccheck.std(axis=0)
bccheck = np.squeeze(np.mean(bccheck, axis=0))
np.fill_diagonal(bccheck, 0)
report['boxcox'] = {}
report['boxcox']['performed'] = 'yes'
report['boxcox']['lambda'] = [
tuple([ind[0][n], ind[1][n], boxcox_list[n, -1]]) for n in range(0, len(ind[0]))]
report['boxcox']['shift'] = mindata
report['boxcox']['shited_to'] = 1
if np.sum(np.isnan(bccheck)) > 0:
report['boxcox'] = {}
report['boxcox']['performed'] = 'FAILED'
report['boxcox']['failure_reason'] = (
'Box cox transform is returning edges with uniform values through time. '
'This is probabaly due to one or more outliers or a very skewed distribution. '
'Have you corrected for sources of noise (e.g. movement)? '
'If yes, some time-series might need additional transforms to approximate to Gaussian.'
)
report['boxcox']['failure_consequence'] = (
'Box cox transform was skipped from the postprocess pipeline.'
)
boxcox_data = data - mindata
error_msg = ('TENETO WARNING: Box Cox transform problem. \n'
'Box Cox transform not performed. \n'
'See report for more details.')
print(error_msg)
return boxcox_data, report | def postpro_boxcox(data, report=None) | Performs box cox transform on everything in data.
If report variable is passed, this is added to the report. | 3.859484 | 3.796534 | 1.016581 |
if not report:
report = {}
# First make dim 1 = time.
data = np.transpose(data, [2, 0, 1])
standardized_data = (data - data.mean(axis=0)) / data.std(axis=0)
standardized_data = np.transpose(standardized_data, [1, 2, 0])
report['standardize'] = {}
report['standardize']['performed'] = 'yes'
report['standardize']['method'] = 'Z-score'
# The above makes self connections to nan, set to 1.
data = set_diagonal(data, 1)
return standardized_data, report | def postpro_standardize(data, report=None) | Standardizes everything in data (along axis -1).
If report variable is passed, this is added to the report. | 3.97132 | 3.979377 | 0.997975 |
weights = np.ones([T, T])
np.fill_diagonal(weights, 0)
report['method'] = 'jackknife'
report['jackknife'] = ''
return weights, report | def _weightfun_jackknife(T, report) | Creates the weights for the jackknife method. See func: teneto.derive.derive. | 4.540163 | 4.203338 | 1.080133 |
weightat0 = np.zeros(T)
weightat0[0:params['windowsize']] = np.ones(params['windowsize'])
weights = np.array([np.roll(weightat0, i)
for i in range(0, T + 1 - params['windowsize'])])
report['method'] = 'slidingwindow'
report['slidingwindow'] = params
report['slidingwindow']['taper'] = 'untapered/uniform'
return weights, report | def _weightfun_sliding_window(T, params, report) | Creates the weights for the sliding window method. See func: teneto.derive.derive. | 4.12341 | 4.035889 | 1.021686 |
x = np.arange(-(params['windowsize'] - 1) / 2, (params['windowsize']) / 2)
distribution_parameters = ','.join(map(str, params['distribution_params']))
taper = eval('sps.' + params['distribution'] +
'.pdf(x,' + distribution_parameters + ')')
weightat0 = np.zeros(T)
weightat0[0:params['windowsize']] = taper
weights = np.array([np.roll(weightat0, i)
for i in range(0, T + 1 - params['windowsize'])])
report['method'] = 'slidingwindow'
report['slidingwindow'] = params
report['slidingwindow']['taper'] = taper
report['slidingwindow']['taper_window'] = x
return weights, report | def _weightfun_tapered_sliding_window(T, params, report) | Creates the weights for the tapered method. See func: teneto.derive.derive. | 3.73615 | 3.713936 | 1.005981 |
distance = getDistanceFunction(params['distance'])
weights = np.array([distance(data[n, :], data[t, :]) for n in np.arange(
0, data.shape[0]) for t in np.arange(0, data.shape[0])])
weights = np.reshape(weights, [data.shape[0], data.shape[0]])
np.fill_diagonal(weights, np.nan)
weights = 1 / weights
weights = (weights - np.nanmin(weights)) / \
(np.nanmax(weights) - np.nanmin(weights))
np.fill_diagonal(weights, 1)
return weights, report | def _weightfun_spatial_distance(data, params, report) | Creates the weights for the spatial distance method. See func: teneto.derive.derive. | 2.366621 | 2.372177 | 0.997658 |
# Data should be timexnode
report = {}
# Derivative
tdat = data[1:, :] - data[:-1, :]
# Normalize
tdat = tdat / np.std(tdat, axis=0)
# Coupling
coupling = np.array([tdat[:, i] * tdat[:, j] for i in np.arange(0,
tdat.shape[1]) for j in np.arange(0, tdat.shape[1])])
coupling = np.reshape(
coupling, [tdat.shape[1], tdat.shape[1], tdat.shape[0]])
# Average over window using strides
shape = coupling.shape[:-1] + (coupling.shape[-1] -
params['windowsize'] + 1, params['windowsize'])
strides = coupling.strides + (coupling.strides[-1],)
coupling_windowed = np.mean(np.lib.stride_tricks.as_strided(
coupling, shape=shape, strides=strides), -1)
report = {}
report['method'] = 'temporalderivative'
report['temporalderivative'] = {}
report['temporalderivative']['windowsize'] = params['windowsize']
return coupling_windowed, report | def _temporal_derivative(data, params, report) | Performs mtd method. See func: teneto.derive.derive. | 3.043904 | 3.043396 | 1.000167 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.