body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def __init__(self, name, width, height):
'Create a representation of a CIF resolution.\n\n :param name: CIF standard name of the resolution.\n :type name: basestring\n :param width: Width of the resolution in pixels.\n :type width: int\n :param height: Height of the resolution in pixels.\n :type height: int\n '
super(CIFVideoResolution, self).__init__(width, height)
self.name = name | -4,374,105,267,372,580,400 | Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int | nodes/axis.py | __init__ | MarcoStb1993/axis_camera | python | def __init__(self, name, width, height):
'Create a representation of a CIF resolution.\n\n :param name: CIF standard name of the resolution.\n :type name: basestring\n :param width: Width of the resolution in pixels.\n :type width: int\n :param height: Height of the resolution in pixels.\n :type height: int\n '
super(CIFVideoResolution, self).__init__(width, height)
self.name = name |
def set_application(self, app, callback=None):
'\n Set ``CommandLineInterface`` instance for this connection.\n (This can be replaced any time.)\n\n :param cli: CommandLineInterface instance.\n :param callback: Callable that takes the result of the CLI.\n '
assert isinstance(app, Application)
assert ((callback is None) or callable(callback))
self.cli = CommandLineInterface(application=app, eventloop=self.eventloop, output=self.vt100_output)
self.callback = callback
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()]
self.cli._is_running = True
def data_received(data):
" TelnetProtocolParser 'data_received' callback "
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return ''
def size_received(rows, columns):
" TelnetProtocolParser 'size_received' callback "
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received) | -4,972,495,098,842,824,000 | Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | set_application | sainjusajan/django-oscar | python | def set_application(self, app, callback=None):
'\n Set ``CommandLineInterface`` instance for this connection.\n (This can be replaced any time.)\n\n :param cli: CommandLineInterface instance.\n :param callback: Callable that takes the result of the CLI.\n '
assert isinstance(app, Application)
assert ((callback is None) or callable(callback))
self.cli = CommandLineInterface(application=app, eventloop=self.eventloop, output=self.vt100_output)
self.callback = callback
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()]
self.cli._is_running = True
def data_received(data):
" TelnetProtocolParser 'data_received' callback "
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return
def size_received(rows, columns):
" TelnetProtocolParser 'size_received' callback "
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received) |
def feed(self, data):
'\n Handler for incoming data. (Called by TelnetServer.)\n '
assert isinstance(data, binary_type)
self.parser.feed(data)
self.cli._redraw()
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
self._handle_command(return_value) | 8,441,465,389,694,218,000 | Handler for incoming data. (Called by TelnetServer.) | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | feed | sainjusajan/django-oscar | python | def feed(self, data):
'\n \n '
assert isinstance(data, binary_type)
self.parser.feed(data)
self.cli._redraw()
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
self._handle_command(return_value) |
def _handle_command(self, command):
'\n Handle command. This will run in a separate thread, in order not\n to block the event loop.\n '
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if (self.callback is not None):
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
if (not self.closed):
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor) | 5,887,364,486,101,096,000 | Handle command. This will run in a separate thread, in order not
to block the event loop. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | _handle_command | sainjusajan/django-oscar | python | def _handle_command(self, command):
'\n Handle command. This will run in a separate thread, in order not\n to block the event loop.\n '
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if (self.callback is not None):
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
if (not self.closed):
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor) |
def erase_screen(self):
'\n Erase output screen.\n '
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush() | 862,328,611,296,033,400 | Erase output screen. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | erase_screen | sainjusajan/django-oscar | python | def erase_screen(self):
'\n \n '
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush() |
def send(self, data):
'\n Send text to the client.\n '
assert isinstance(data, text_type)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush() | 8,521,474,259,566,262,000 | Send text to the client. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | send | sainjusajan/django-oscar | python | def send(self, data):
'\n \n '
assert isinstance(data, text_type)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush() |
def close(self):
'\n Close the connection.\n '
self.application.client_leaving(self)
self.conn.close()
self.closed = True | 1,440,076,575,365,623,600 | Close the connection. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | close | sainjusajan/django-oscar | python | def close(self):
'\n \n '
self.application.client_leaving(self)
self.conn.close()
self.closed = True |
def close(self):
' Ignore. ' | 7,883,102,787,620,217,000 | Ignore. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | close | sainjusajan/django-oscar | python | def close(self):
' ' |
def stop(self):
' Ignore. ' | 2,207,530,068,346,443,300 | Ignore. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | stop | sainjusajan/django-oscar | python | def stop(self):
' ' |
def _process_callbacks(self):
'\n Process callbacks from `call_from_executor` in eventloop.\n '
os.read(self._schedule_pipe[0], 1024)
(calls_from_executor, self._calls_from_executor) = (self._calls_from_executor, [])
for c in calls_from_executor:
c() | -1,612,742,384,304,309,500 | Process callbacks from `call_from_executor` in eventloop. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | _process_callbacks | sainjusajan/django-oscar | python | def _process_callbacks(self):
'\n \n '
os.read(self._schedule_pipe[0], 1024)
(calls_from_executor, self._calls_from_executor) = (self._calls_from_executor, [])
for c in calls_from_executor:
c() |
def run(self):
'\n Run the eventloop for the telnet server.\n '
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
self.connections = set([c for c in self.connections if (not c.closed)])
connections = set([c for c in self.connections if (not c.handling_command)])
read_list = ([listen_socket, self._schedule_pipe[0]] + [c.conn for c in connections])
(read, _, _) = select.select(read_list, [], [])
for s in read:
if (s == listen_socket):
self._accept(listen_socket)
elif (s == self._schedule_pipe[0]):
self._process_callbacks()
else:
self._handle_incoming_data(s)
finally:
listen_socket.close() | 2,312,991,810,713,612,300 | Run the eventloop for the telnet server. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | run | sainjusajan/django-oscar | python | def run(self):
'\n \n '
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
self.connections = set([c for c in self.connections if (not c.closed)])
connections = set([c for c in self.connections if (not c.handling_command)])
read_list = ([listen_socket, self._schedule_pipe[0]] + [c.conn for c in connections])
(read, _, _) = select.select(read_list, [], [])
for s in read:
if (s == listen_socket):
self._accept(listen_socket)
elif (s == self._schedule_pipe[0]):
self._process_callbacks()
else:
self._handle_incoming_data(s)
finally:
listen_socket.close() |
def _accept(self, listen_socket):
'\n Accept new incoming connection.\n '
(conn, addr) = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr) | 6,325,692,078,672,708,000 | Accept new incoming connection. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | _accept | sainjusajan/django-oscar | python | def _accept(self, listen_socket):
'\n \n '
(conn, addr) = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr) |
def _handle_incoming_data(self, conn):
'\n Handle incoming data on socket.\n '
connection = [c for c in self.connections if (c.conn == conn)][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection) | -6,780,554,973,943,267,000 | Handle incoming data on socket. | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | _handle_incoming_data | sainjusajan/django-oscar | python | def _handle_incoming_data(self, conn):
'\n \n '
connection = [c for c in self.connections if (c.conn == conn)][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection) |
def data_received(data):
" TelnetProtocolParser 'data_received' callback "
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return '' | -8,787,866,247,884,166,000 | TelnetProtocolParser 'data_received' callback | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | data_received | sainjusajan/django-oscar | python | def data_received(data):
" "
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return |
def size_received(rows, columns):
" TelnetProtocolParser 'size_received' callback "
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed() | 1,583,678,943,765,159,400 | TelnetProtocolParser 'size_received' callback | oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | size_received | sainjusajan/django-oscar | python | def size_received(rows, columns):
" "
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed() |
def kraken2_transformer(all_rank_summary, output_rank_summaries, ranks):
'Converts a summary of all ranks from kraken into rank-wise profiles\n similar to the CAMI-SIM output\n\n Parameters\n ----------\n all_rank_summary\n output_rank_summaries\n ranks\n\n Returns\n -------\n\n '
all_ranks = pd.read_csv(all_rank_summary, sep='\t')
all_ranks.columns = kraken_columns
all_ranks['rank'] = all_ranks['rank'].str[0]
all_ranks = all_ranks.loc[all_ranks['rank'].isin(kraken_rank_dictionary)]
all_ranks['RANK'] = [kraken_rank_dictionary[key] for key in all_ranks['rank']]
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for (output_, rank) in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[(all_ranks['RANK'] == rank)]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False) | 31,499,075,654,049,030 | Converts a summary of all ranks from kraken into rank-wise profiles
similar to the CAMI-SIM output
Parameters
----------
all_rank_summary
output_rank_summaries
ranks
Returns
------- | benchutils/transformers.py | kraken2_transformer | qiyunzhu/taxa-assign-benchmarking | python | def kraken2_transformer(all_rank_summary, output_rank_summaries, ranks):
'Converts a summary of all ranks from kraken into rank-wise profiles\n similar to the CAMI-SIM output\n\n Parameters\n ----------\n all_rank_summary\n output_rank_summaries\n ranks\n\n Returns\n -------\n\n '
all_ranks = pd.read_csv(all_rank_summary, sep='\t')
all_ranks.columns = kraken_columns
all_ranks['rank'] = all_ranks['rank'].str[0]
all_ranks = all_ranks.loc[all_ranks['rank'].isin(kraken_rank_dictionary)]
all_ranks['RANK'] = [kraken_rank_dictionary[key] for key in all_ranks['rank']]
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for (output_, rank) in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[(all_ranks['RANK'] == rank)]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False) |
def define_argparse():
' todo, find a way to use this effectively '
import argparse
parser = argparse.ArgumentParser(description='IBEIS super setup')
def add_flag(group, name, help=None):
group.add_argument(name.replace('--', ''), action='store_true', default=False, help=help)
g1 = parser.add_argument_group('setup')
add_flag(g1, 'bootstrap', help='outputs commands to install prereqs')
add_flag(g1, 'ensure', help='ensures that all repos are checked out')
add_flag(g1, 'build', help='builds python packages')
add_flag(g1, 'develop', help='installs packages in developer mode')
add_flag(g1, 'dcnn', help='setup dcnn packages')
g4 = parser.add_argument_group('maintenance')
add_flag(g4, 'pull', help='pulls all IBIES repos')
g3 = parser.add_argument_group('extern')
add_flag(g3, 'no_qt')
add_flag(g3, 'no_gui')
add_flag(g3, 'ignore_opencv')
g2 = parser.add_argument_group('utils')
add_flag(g2, 'move_wildme', help='changes to the wildme repos')
args = parser.parse_args()
return args | -2,534,346,957,582,279,000 | todo, find a way to use this effectively | super_setup.py | define_argparse | brmscheiner/ibeis | python | def define_argparse():
' '
import argparse
parser = argparse.ArgumentParser(description='IBEIS super setup')
def add_flag(group, name, help=None):
group.add_argument(name.replace('--', ), action='store_true', default=False, help=help)
g1 = parser.add_argument_group('setup')
add_flag(g1, 'bootstrap', help='outputs commands to install prereqs')
add_flag(g1, 'ensure', help='ensures that all repos are checked out')
add_flag(g1, 'build', help='builds python packages')
add_flag(g1, 'develop', help='installs packages in developer mode')
add_flag(g1, 'dcnn', help='setup dcnn packages')
g4 = parser.add_argument_group('maintenance')
add_flag(g4, 'pull', help='pulls all IBIES repos')
g3 = parser.add_argument_group('extern')
add_flag(g3, 'no_qt')
add_flag(g3, 'no_gui')
add_flag(g3, 'ignore_opencv')
g2 = parser.add_argument_group('utils')
add_flag(g2, 'move_wildme', help='changes to the wildme repos')
args = parser.parse_args()
return args |
def get_plat_specifier():
'\n Standard platform specifier used by distutils\n '
import setuptools
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ('.%s-%s' % (plat_name, sys.version[0:3]))
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier | 7,519,424,281,882,282,000 | Standard platform specifier used by distutils | super_setup.py | get_plat_specifier | brmscheiner/ibeis | python | def get_plat_specifier():
'\n \n '
import setuptools
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ('.%s-%s' % (plat_name, sys.version[0:3]))
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier |
def import_module_from_fpath(module_fpath):
' imports module from a file path '
import platform
from os.path import basename, splitext
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
else:
raise AssertionError('invalid python version')
return module | -8,750,936,844,449,273,000 | imports module from a file path | super_setup.py | import_module_from_fpath | brmscheiner/ibeis | python | def import_module_from_fpath(module_fpath):
' '
import platform
from os.path import basename, splitext
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
else:
raise AssertionError('invalid python version')
return module |
def define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3):
'\n export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"\n set THEANO_FLAGS=device=cpu,print_active_device=True,enable_initial_driver_test=True,print_test_value=True\n\n python -c "import pydot; print(pydot.__file__)"\n python -c "import pydot; print(pydot.__version__)"\n python -c "import pydot; print(pydot.find_graphviz())"\n DEVICE="cuda" python -c "import pygpu;pygpu.test()"\n python -c "import theano; print(theano.__file__)"\n # python -c "import pylearn2; print(pylearn2.__file__)"\n python -c "import lasagne; print(lasagne.__file__)"\n python -c "import ibeis_cnn; print(ibeis_cnn.__file__)"\n python -c "import detecttools; print(detecttools.__file__)"\n\n # http://stackoverflow.com/questions/18042919/how-to-install-pyqt5-on-a-new-virtualenv-and-work-on-an-idle\n pip install vext.pyqt5\n sudo apt-get install pyqt5-dev\n sudo apt-get install python3-pyqt5\n python\n python -c "import sip; print(\'[test] Python can import sip\')"\n python -c "import sip; print(\'sip.__file__=%r\' % (sip.__file__,))"\n python -c "import sip; print(\'sip.SIP_VERSION=%r\' % (sip.SIP_VERSION,))"\n python -c "import sip; print(\'sip.SIP_VERSION_STR=%r\' % (sip.SIP_VERSION_STR,))"\n\n ln -s /usr/lib/python3/dist-packages/PyQt5/ /home/joncrall/venv3/lib/python3.4/site-packages/PyQt5\n ln -s /usr/lib/python3/dist-packages/sip*.so /home/joncrall/venv3/lib/python3.4/site-packages/\n ln -s /usr/lib/python3/dist-packages/sip*.py /home/joncrall/venv3/lib/python3.4/site-packages/\n '
import utool as ut
major = str(sys.version_info.major)
minor = str(sys.version_info.minor)
majorminor = [major, minor]
pyoff = ('2' if (sys.version_info.major == 3) else '3')
pyon = majorminor[0]
plat_spec = get_plat_specifier()
build_dname = ('cmake_builds/build' + plat_spec)
script_fmtdict = {'pyexe': sys.executable, 'pyversion': ('python' + '.'.join(majorminor)), 'pypkg_var': (('PYTHON' + pyon) + '_PACKAGES_PATH'), 'build_dname': build_dname, 'pyoff': pyoff, 'pyon': pyon, 'cv_pyon_var': ('BUILD_opencv_python' + pyon), 'cv_pyoff_var': ('BUILD_opencv_python' + pyoff), 'plat_spec': plat_spec, 'source_dpath': '../..', 'libext': ut.get_lib_ext()}
if (os.environ.get('VIRTUAL_ENV', '') == ''):
if sys.platform.startswith('darwin'):
local_prefix = '/opt/local'
else:
local_prefix = '/usr/local'
else:
local_prefix = os.environ['VIRTUAL_ENV']
opencv_dir = os.path.join(local_prefix, '/share/OpenCV')
if (not os.path.exists(opencv_dir)):
if (not ut.get_argflag('--opencv')):
opencv_dir = ''
print('OpenCV is not installed in the expected location: {}'.format(opencv_dir))
print('Running this script with --opencv will build and install it there')
python_bash_setup = ut.codeblock('\n # STARTBLOCK bash\n\n if [[ "$VIRTUAL_ENV" == "" ]]; then\n # The case where we are installying system-wide\n # It is recommended that a virtual enviornment is used instead\n export PYTHON_EXECUTABLE=$(which {pyversion})\n if [[ \'$OSTYPE\' == \'darwin\'* ]]; then\n # Mac system info\n export LOCAL_PREFIX=/opt/local\n export {pypkg_var}=$($PYTHON_EXECUTABLE -c "import site; print(site.getsitepackages()[0])")\n export PYTHON_PACKAGES_PATH=${pypkg_var}\n export _SUDO="sudo"\n else\n # Linux system info\n export LOCAL_PREFIX=/usr/local\n export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/dist-packages\n export PYTHON_PACKAGES_PATH=${pypkg_var}\n export _SUDO="sudo"\n fi\n # No windows support here\n else\n # The prefered case where we are in a virtual environment\n export PYTHON_EXECUTABLE=$(which python)\n # export LOCAL_PREFIX=$VIRTUAL_ENV/local\n export LOCAL_PREFIX=$VIRTUAL_ENV\n export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/site-packages\n export PYTHON_PACKAGES_PATH=${pypkg_var}\n export _SUDO=""\n fi\n\n echo "LOCAL_PREFIX = $LOCAL_PREFIX"\n echo "{pypkg_var} = ${pypkg_var}"\n # ENDBLOCK bash\n ').format(**script_fmtdict)
script_fmtdict['python_bash_setup'] = python_bash_setup
ibeis_rman['pyflann'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n\n cd {repo_dir}\n mkdir -p {build_dname}\n cd {build_dname}\n\n cmake -G "Unix Makefiles" \\\n -DCMAKE_BUILD_TYPE="Release" \\\n -DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \\\n -DBUILD_EXAMPLES=Off \\\n -DBUILD_TESTS=Off \\\n -DBUILD_PYTHON_BINDINGS=On \\\n -DBUILD_MATLAB_BINDINGS=Off \\\n -DBUILD_CUDA_LIB=Off\\\n -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\\\n {source_dpath}\n\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS\n\n # ENDBLOCK bash\n ').format(repo_dir=ibeis_rman['pyflann'].dpath, **script_fmtdict))
ibeis_rman['pyflann'].add_script('install', ut.codeblock('\n # STARTBLOCK bash\n # The pyflann source lives here\n cd {repo_dir}/src/python\n # Need to run build to move the libs to the build directory\n python setup.py build\n # Use pip to editable install\n pip install -e {repo_dir}/src/python\n\n # Old way of doing it\n # But the setup script is generated during build\n # python {repo_dir}/build/src/python/setup.py develop\n\n python -c "import pyflann; print(pyflann.__file__)" --verb-flann\n python -c "import pyflann; print(pyflann)" --verb-flann\n # ENDBLOCK bash\n ').format(repo_dir=ibeis_rman['pyflann'].dpath))
ibeis_rman['hesaff'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n cd $CODE_DIR/hesaff\n mkdir -p {build_dname}\n cd {build_dname}\n\n # only specify an explicit opencv directory if we know one exists\n if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then\n OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"\n else\n OPENCV_ARGS=""\n fi\n\n echo \'Configuring with cmake\'\n if [[ \'$OSTYPE\' == \'darwin\'* ]]; then\n cmake -G "Unix Makefiles" \\\n -DCMAKE_OSX_ARCHITECTURES=x86_64 \\\n -DCMAKE_C_COMPILER=clang2 \\\n -DCMAKE_CXX_COMPILER=clang2++ \\\n -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \\\n $OPENCV_ARGS \\\n {source_dpath}\n else\n cmake -G "Unix Makefiles" \\\n -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \\\n $OPENCV_ARGS \\\n {source_dpath}\n fi\n\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS\n\n export MAKE_EXITCODE=$?\n echo "MAKE_EXITCODE=$MAKE_EXITCODE"\n\n # Move the compiled library into the source folder\n if [[ $MAKE_EXITCODE == 0 ]]; then\n #make VERBOSE=1\n cp -v libhesaff{libext} {source_dpath}/pyhesaff/libhesaff{plat_spec}{libext}\n fi\n\n # ENDBLOCK\n ').format(**script_fmtdict))
ibeis_rman['pydarknet'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n cd $CODE_DIR/pydarknet\n\n mkdir -p {build_dname}\n cd {build_dname}\n\n if [[ "$(which nvcc)" == "" ]]; then\n export CMAKE_CUDA=Off\n else\n export CMAKE_CUDA=On\n fi\n\n # only specify an explicit opencv directory if we know one exists\n if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then\n OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"\n else\n OPENCV_ARGS=""\n fi\n\n echo \'Configuring with cmake\'\n if [[ \'$OSTYPE\' == \'darwin\'* ]]; then\n export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"\n else\n export CONFIG="-DCMAKE_BUILD_TYPE=\'Release\' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"\n fi\n export CONFIG="$CONFIG -DCUDA=$CMAKE_CUDA"\n echo "CONFIG = $CONFIG"\n\n cmake $CONFIG -G \'Unix Makefiles\' {source_dpath}\n #################################\n echo \'Building with make\'\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS -w\n #################################\n\n export MAKE_EXITCODE=$?\n echo "MAKE_EXITCODE=$MAKE_EXITCODE"\n\n # Move the compiled library into the source folder\n if [[ $MAKE_EXITCODE == 0 ]]; then\n echo \'Moving the shared library\'\n # cp -v lib* ../pydarknet\n cp -v lib*{libext} {source_dpath}/pydarknet\n # cp -v libdarknet{libext} {source_dpath}/pydarknet/libdarknet{plat_spec}{libext}\n fi\n\n # ENDBLOCK\n ').format(**script_fmtdict))
ibeis_rman['pyrf'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n cd $CODE_DIR/pyrf\n\n mkdir -p {build_dname}\n cd {build_dname}\n\n # only specify an explicit opencv directory if we know one exists\n if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then\n OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"\n else\n OPENCV_ARGS=""\n fi\n\n echo \'Configuring with cmake\'\n if [[ \'$OSTYPE\' == \'darwin\'* ]]; then\n export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"\n else\n export CONFIG="-DCMAKE_BUILD_TYPE=\'Release\' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"\n fi\n echo "CONFIG = $CONFIG"\n\n cmake $CONFIG -G \'Unix Makefiles\' {source_dpath}\n #################################\n echo \'Building with make\'\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS -w\n #################################\n\n export MAKE_EXITCODE=$?\n echo "MAKE_EXITCODE=$MAKE_EXITCODE"\n\n # Move the compiled library into the source folder\n if [[ $MAKE_EXITCODE == 0 ]]; then\n echo \'Moving the shared library\'\n # cp -v lib* ../pyrf\n cp -v lib*{libext} {source_dpath}/pyrf\n # cp -v libpyrf{libext} {source_dpath}/pyrf/libpyrf{plat_spec}{libext}\n fi\n\n # ENDBLOCK\n ').format(**script_fmtdict))
'\n ./super_setup.py --dump-scripts\n '
tpl_rman['cv2'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n # Checkout opencv core\n cd $CODE_DIR\n # export REPO_DIR=$CODE_DIR/opencv\n export REPO_DIR={repo_dpath}\n # git clone https://github.com/Itseez/opencv.git\n cd $REPO_DIR\n # Checkout opencv extras\n git clone https://github.com/Itseez/opencv_contrib.git\n # cd opencv_contrib\n # git pull\n # cd ..\n # git pull\n mkdir -p $REPO_DIR/{build_dname}\n cd $REPO_DIR/{build_dname}\n\n cmake -G "Unix Makefiles" \\\n -D WITH_OPENMP=ON \\\n -D CMAKE_BUILD_TYPE=RELEASE \\\n -D {cv_pyoff_var}=Off \\\n -D {cv_pyon_var}=On \\\n -D PYTHON_DEFAULT_EXECUTABLE="{pyexe}" \\\n -D {pypkg_var}=${pypkg_var} \\\n -D CMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \\\n -D OPENCV_EXTRA_MODULES_PATH=$REPO_DIR/opencv_contrib/modules \\\n -D WITH_CUDA=Off \\\n -D BUILD_opencv_dnn=Off \\\n -D BUILD_opencv_dnn_modern=Off \\\n -D WITH_VTK=Off \\\n -D WITH_CUDA=Off \\\n -D WITH_MATLAB=Off \\\n $REPO_DIR\n # -D WITH_OPENCL=Off \\\n # -D BUILD_opencv_face=Off \\\n # -D BUILD_opencv_objdetect=Off \\\n # -D BUILD_opencv_video=Off \\\n # -D BUILD_opencv_videoio=Off \\\n # -D BUILD_opencv_videostab=Off \\\n # -D BUILD_opencv_ximgproc=Off \\\n # -D BUILD_opencv_xobjdetect=Off \\\n # -D BUILD_opencv_xphoto=Off \\\n # -D BUILD_opencv_datasets=Off \\\n # -D CXX_FLAGS="-std=c++11" \\ %TODO\n\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS\n # ENDBLOCK\n ').format(repo_dpath=ut.unexpanduser(tpl_rman['cv2'].dpath), **script_fmtdict))
tpl_rman['cv2'].add_script('install', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n\n cd $CODE_DIR/opencv/{build_dname}\n\n $_SUDO make install\n # Hack because cv2 does not want to be installed for some reason\n # cp lib/cv2.so $PYTHON_PACKAGES_PATH\n # Seems to work now that local is removed from prefix\n # cp -v lib/cv2.so $PYTHON_PACKAGES_PATH\n # Test makesure things working\n python -c "import numpy; print(numpy.__file__)"\n python -c "import numpy; print(numpy.__version__)"\n python -c "import cv2; print(cv2.__version__)"\n python -c "import cv2; print(cv2.__file__)"\n #python -c "import vtool"\n # Check if we have contrib modules\n python -c "import cv2; print(cv2.xfeatures2d)"\n # ENDBLOCK\n ').format(**script_fmtdict))
tpl_rman['libgpuarray'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n\n # Ensure the repo was checked out\n if [ ! -d {repo_dpath} ]; then\n git clone https://github.com/Theano/libgpuarray.git {repo_dpath}\n fi\n\n\n {python_bash_setup}\n cd {repo_dpath}\n\n # need a specific version of libgpuarray\n git checkout tags/v0.6.2 -b v0.6.2\n\n mkdir -p {repo_dpath}/{build_dname}\n cd {repo_dpath}/{build_dname}\n\n # First build the C library\n cmake {repo_dpath} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS\n $_SUDO make install\n\n # Now build the python libarary\n cd {repo_dpath}\n python setup.py build_ext -L $LOCAL_PREFIX/lib -I $LOCAL_PREFIX/include\n python setup.py build\n # python setup.py install\n $_SUDO pip install -e {repo_dpath}\n\n # DEVICE="<test device>" python -c "import pygpu;pygpu.test()"\n # DEVICE="gpu0" python -c "import pygpu;pygpu.test()"\n cd ~\n $_SUDO pip install nose\n DEVICE="cuda" python -c "import pygpu;pygpu.test()"\n\n # pip uninstall pygpu\n # ENDBLOCK\n ').format(repo_dpath=ut.unexpanduser(tpl_rman['libgpuarray'].dpath), **script_fmtdict))
if ut.in_virtual_env():
try:
fmtdict = {'sys_dist_packages': ut.get_global_dist_packages_dir(), 'venv_site_packages': ut.get_site_packages_dir(), 'pyqt': ('PyQt4' if PY2 else 'PyQt5'), 'debian-python-qt': ('python-qt4' if PY2 else 'qt5-default python3-pyqt5 debian-python-qt-svg'), 'pip-python-qt': ('python-qt4' if PY2 else 'python-qt5')}
system_to_venv = ut.codeblock('\n # STARTBLOCK bash\n # Creates a symlink to the global PyQt in a virtual env\n export GLOBAL_DIST_PACKAGES="{sys_dist_packages}"\n export VENV_DIST_PACKAGES="{venv_site_packages}"\n if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then\n echo "have qt"\n ls $GLOBAL_DIST_PACKAGES/{pyqt}\n ls $VENV_DIST_PACKAGES/{pyqt}\n else\n # Ensure PyQt is installed first (FIXME make this work for non-debian systems)\n sudo apt-get install {debian-python-qt}\n # pip install {pip-python-qt}\n fi\n if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then\n # Install system pyqt packages to virtual envirment via symlink\n ln -s $GLOBAL_DIST_PACKAGES/{pyqt}/ $VENV_DIST_PACKAGES/{pyqt}\n ln -s $GLOBAL_DIST_PACKAGES/sip*.so $VENV_DIST_PACKAGES/\n ln -s $GLOBAL_DIST_PACKAGES/sip*.py $VENV_DIST_PACKAGES/\n else\n echo "{pyqt} DOES NOT SEEM TO BE INSTALLED ON THE SYSTEM"\n fi\n echo "testing"\n python -c "import {pyqt}; print({pyqt})"\n # ENDBLOCK bash\n ').format(**fmtdict)
tpl_rman['PyQt'].add_script('system_to_venv', system_to_venv)
except NotImplementedError:
pass | 1,498,355,901,527,397,600 | export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"
set THEANO_FLAGS=device=cpu,print_active_device=True,enable_initial_driver_test=True,print_test_value=True
python -c "import pydot; print(pydot.__file__)"
python -c "import pydot; print(pydot.__version__)"
python -c "import pydot; print(pydot.find_graphviz())"
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
python -c "import theano; print(theano.__file__)"
# python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import lasagne; print(lasagne.__file__)"
python -c "import ibeis_cnn; print(ibeis_cnn.__file__)"
python -c "import detecttools; print(detecttools.__file__)"
# http://stackoverflow.com/questions/18042919/how-to-install-pyqt5-on-a-new-virtualenv-and-work-on-an-idle
pip install vext.pyqt5
sudo apt-get install pyqt5-dev
sudo apt-get install python3-pyqt5
python
python -c "import sip; print('[test] Python can import sip')"
python -c "import sip; print('sip.__file__=%r' % (sip.__file__,))"
python -c "import sip; print('sip.SIP_VERSION=%r' % (sip.SIP_VERSION,))"
python -c "import sip; print('sip.SIP_VERSION_STR=%r' % (sip.SIP_VERSION_STR,))"
ln -s /usr/lib/python3/dist-packages/PyQt5/ /home/joncrall/venv3/lib/python3.4/site-packages/PyQt5
ln -s /usr/lib/python3/dist-packages/sip*.so /home/joncrall/venv3/lib/python3.4/site-packages/
ln -s /usr/lib/python3/dist-packages/sip*.py /home/joncrall/venv3/lib/python3.4/site-packages/ | super_setup.py | define_custom_scripts | brmscheiner/ibeis | python | def define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3):
'\n export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"\n set THEANO_FLAGS=device=cpu,print_active_device=True,enable_initial_driver_test=True,print_test_value=True\n\n python -c "import pydot; print(pydot.__file__)"\n python -c "import pydot; print(pydot.__version__)"\n python -c "import pydot; print(pydot.find_graphviz())"\n DEVICE="cuda" python -c "import pygpu;pygpu.test()"\n python -c "import theano; print(theano.__file__)"\n # python -c "import pylearn2; print(pylearn2.__file__)"\n python -c "import lasagne; print(lasagne.__file__)"\n python -c "import ibeis_cnn; print(ibeis_cnn.__file__)"\n python -c "import detecttools; print(detecttools.__file__)"\n\n # http://stackoverflow.com/questions/18042919/how-to-install-pyqt5-on-a-new-virtualenv-and-work-on-an-idle\n pip install vext.pyqt5\n sudo apt-get install pyqt5-dev\n sudo apt-get install python3-pyqt5\n python\n python -c "import sip; print(\'[test] Python can import sip\')"\n python -c "import sip; print(\'sip.__file__=%r\' % (sip.__file__,))"\n python -c "import sip; print(\'sip.SIP_VERSION=%r\' % (sip.SIP_VERSION,))"\n python -c "import sip; print(\'sip.SIP_VERSION_STR=%r\' % (sip.SIP_VERSION_STR,))"\n\n ln -s /usr/lib/python3/dist-packages/PyQt5/ /home/joncrall/venv3/lib/python3.4/site-packages/PyQt5\n ln -s /usr/lib/python3/dist-packages/sip*.so /home/joncrall/venv3/lib/python3.4/site-packages/\n ln -s /usr/lib/python3/dist-packages/sip*.py /home/joncrall/venv3/lib/python3.4/site-packages/\n '
import utool as ut
major = str(sys.version_info.major)
minor = str(sys.version_info.minor)
majorminor = [major, minor]
pyoff = ('2' if (sys.version_info.major == 3) else '3')
pyon = majorminor[0]
plat_spec = get_plat_specifier()
build_dname = ('cmake_builds/build' + plat_spec)
script_fmtdict = {'pyexe': sys.executable, 'pyversion': ('python' + '.'.join(majorminor)), 'pypkg_var': (('PYTHON' + pyon) + '_PACKAGES_PATH'), 'build_dname': build_dname, 'pyoff': pyoff, 'pyon': pyon, 'cv_pyon_var': ('BUILD_opencv_python' + pyon), 'cv_pyoff_var': ('BUILD_opencv_python' + pyoff), 'plat_spec': plat_spec, 'source_dpath': '../..', 'libext': ut.get_lib_ext()}
if (os.environ.get('VIRTUAL_ENV', ) == ):
if sys.platform.startswith('darwin'):
local_prefix = '/opt/local'
else:
local_prefix = '/usr/local'
else:
local_prefix = os.environ['VIRTUAL_ENV']
opencv_dir = os.path.join(local_prefix, '/share/OpenCV')
if (not os.path.exists(opencv_dir)):
if (not ut.get_argflag('--opencv')):
opencv_dir =
print('OpenCV is not installed in the expected location: {}'.format(opencv_dir))
print('Running this script with --opencv will build and install it there')
python_bash_setup = ut.codeblock('\n # STARTBLOCK bash\n\n if [[ "$VIRTUAL_ENV" == ]]; then\n # The case where we are installying system-wide\n # It is recommended that a virtual enviornment is used instead\n export PYTHON_EXECUTABLE=$(which {pyversion})\n if [[ \'$OSTYPE\' == \'darwin\'* ]]; then\n # Mac system info\n export LOCAL_PREFIX=/opt/local\n export {pypkg_var}=$($PYTHON_EXECUTABLE -c "import site; print(site.getsitepackages()[0])")\n export PYTHON_PACKAGES_PATH=${pypkg_var}\n export _SUDO="sudo"\n else\n # Linux system info\n export LOCAL_PREFIX=/usr/local\n export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/dist-packages\n export PYTHON_PACKAGES_PATH=${pypkg_var}\n export _SUDO="sudo"\n fi\n # No windows support here\n else\n # The prefered case where we are in a virtual environment\n export PYTHON_EXECUTABLE=$(which python)\n # export LOCAL_PREFIX=$VIRTUAL_ENV/local\n export LOCAL_PREFIX=$VIRTUAL_ENV\n export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/site-packages\n export PYTHON_PACKAGES_PATH=${pypkg_var}\n export _SUDO=\n fi\n\n echo "LOCAL_PREFIX = $LOCAL_PREFIX"\n echo "{pypkg_var} = ${pypkg_var}"\n # ENDBLOCK bash\n ').format(**script_fmtdict)
script_fmtdict['python_bash_setup'] = python_bash_setup
ibeis_rman['pyflann'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n\n cd {repo_dir}\n mkdir -p {build_dname}\n cd {build_dname}\n\n cmake -G "Unix Makefiles" \\\n -DCMAKE_BUILD_TYPE="Release" \\\n -DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \\\n -DBUILD_EXAMPLES=Off \\\n -DBUILD_TESTS=Off \\\n -DBUILD_PYTHON_BINDINGS=On \\\n -DBUILD_MATLAB_BINDINGS=Off \\\n -DBUILD_CUDA_LIB=Off\\\n -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\\\n {source_dpath}\n\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS\n\n # ENDBLOCK bash\n ').format(repo_dir=ibeis_rman['pyflann'].dpath, **script_fmtdict))
ibeis_rman['pyflann'].add_script('install', ut.codeblock('\n # STARTBLOCK bash\n # The pyflann source lives here\n cd {repo_dir}/src/python\n # Need to run build to move the libs to the build directory\n python setup.py build\n # Use pip to editable install\n pip install -e {repo_dir}/src/python\n\n # Old way of doing it\n # But the setup script is generated during build\n # python {repo_dir}/build/src/python/setup.py develop\n\n python -c "import pyflann; print(pyflann.__file__)" --verb-flann\n python -c "import pyflann; print(pyflann)" --verb-flann\n # ENDBLOCK bash\n ').format(repo_dir=ibeis_rman['pyflann'].dpath))
ibeis_rman['hesaff'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n cd $CODE_DIR/hesaff\n mkdir -p {build_dname}\n cd {build_dname}\n\n # only specify an explicit opencv directory if we know one exists\n if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then\n OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"\n else\n OPENCV_ARGS=\n fi\n\n echo \'Configuring with cmake\'\n if [[ \'$OSTYPE\' == \'darwin\'* ]]; then\n cmake -G "Unix Makefiles" \\\n -DCMAKE_OSX_ARCHITECTURES=x86_64 \\\n -DCMAKE_C_COMPILER=clang2 \\\n -DCMAKE_CXX_COMPILER=clang2++ \\\n -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \\\n $OPENCV_ARGS \\\n {source_dpath}\n else\n cmake -G "Unix Makefiles" \\\n -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \\\n $OPENCV_ARGS \\\n {source_dpath}\n fi\n\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS\n\n export MAKE_EXITCODE=$?\n echo "MAKE_EXITCODE=$MAKE_EXITCODE"\n\n # Move the compiled library into the source folder\n if [[ $MAKE_EXITCODE == 0 ]]; then\n #make VERBOSE=1\n cp -v libhesaff{libext} {source_dpath}/pyhesaff/libhesaff{plat_spec}{libext}\n fi\n\n # ENDBLOCK\n ').format(**script_fmtdict))
ibeis_rman['pydarknet'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n cd $CODE_DIR/pydarknet\n\n mkdir -p {build_dname}\n cd {build_dname}\n\n if [[ "$(which nvcc)" == ]]; then\n export CMAKE_CUDA=Off\n else\n export CMAKE_CUDA=On\n fi\n\n # only specify an explicit opencv directory if we know one exists\n if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then\n OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"\n else\n OPENCV_ARGS=\n fi\n\n echo \'Configuring with cmake\'\n if [[ \'$OSTYPE\' == \'darwin\'* ]]; then\n export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"\n else\n export CONFIG="-DCMAKE_BUILD_TYPE=\'Release\' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"\n fi\n export CONFIG="$CONFIG -DCUDA=$CMAKE_CUDA"\n echo "CONFIG = $CONFIG"\n\n cmake $CONFIG -G \'Unix Makefiles\' {source_dpath}\n #################################\n echo \'Building with make\'\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS -w\n #################################\n\n export MAKE_EXITCODE=$?\n echo "MAKE_EXITCODE=$MAKE_EXITCODE"\n\n # Move the compiled library into the source folder\n if [[ $MAKE_EXITCODE == 0 ]]; then\n echo \'Moving the shared library\'\n # cp -v lib* ../pydarknet\n cp -v lib*{libext} {source_dpath}/pydarknet\n # cp -v libdarknet{libext} {source_dpath}/pydarknet/libdarknet{plat_spec}{libext}\n fi\n\n # ENDBLOCK\n ').format(**script_fmtdict))
ibeis_rman['pyrf'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n cd $CODE_DIR/pyrf\n\n mkdir -p {build_dname}\n cd {build_dname}\n\n # only specify an explicit opencv directory if we know one exists\n if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then\n OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"\n else\n OPENCV_ARGS=\n fi\n\n echo \'Configuring with cmake\'\n if [[ \'$OSTYPE\' == \'darwin\'* ]]; then\n export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"\n else\n export CONFIG="-DCMAKE_BUILD_TYPE=\'Release\' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"\n fi\n echo "CONFIG = $CONFIG"\n\n cmake $CONFIG -G \'Unix Makefiles\' {source_dpath}\n #################################\n echo \'Building with make\'\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS -w\n #################################\n\n export MAKE_EXITCODE=$?\n echo "MAKE_EXITCODE=$MAKE_EXITCODE"\n\n # Move the compiled library into the source folder\n if [[ $MAKE_EXITCODE == 0 ]]; then\n echo \'Moving the shared library\'\n # cp -v lib* ../pyrf\n cp -v lib*{libext} {source_dpath}/pyrf\n # cp -v libpyrf{libext} {source_dpath}/pyrf/libpyrf{plat_spec}{libext}\n fi\n\n # ENDBLOCK\n ').format(**script_fmtdict))
'\n ./super_setup.py --dump-scripts\n '
tpl_rman['cv2'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n # Checkout opencv core\n cd $CODE_DIR\n # export REPO_DIR=$CODE_DIR/opencv\n export REPO_DIR={repo_dpath}\n # git clone https://github.com/Itseez/opencv.git\n cd $REPO_DIR\n # Checkout opencv extras\n git clone https://github.com/Itseez/opencv_contrib.git\n # cd opencv_contrib\n # git pull\n # cd ..\n # git pull\n mkdir -p $REPO_DIR/{build_dname}\n cd $REPO_DIR/{build_dname}\n\n cmake -G "Unix Makefiles" \\\n -D WITH_OPENMP=ON \\\n -D CMAKE_BUILD_TYPE=RELEASE \\\n -D {cv_pyoff_var}=Off \\\n -D {cv_pyon_var}=On \\\n -D PYTHON_DEFAULT_EXECUTABLE="{pyexe}" \\\n -D {pypkg_var}=${pypkg_var} \\\n -D CMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \\\n -D OPENCV_EXTRA_MODULES_PATH=$REPO_DIR/opencv_contrib/modules \\\n -D WITH_CUDA=Off \\\n -D BUILD_opencv_dnn=Off \\\n -D BUILD_opencv_dnn_modern=Off \\\n -D WITH_VTK=Off \\\n -D WITH_CUDA=Off \\\n -D WITH_MATLAB=Off \\\n $REPO_DIR\n # -D WITH_OPENCL=Off \\\n # -D BUILD_opencv_face=Off \\\n # -D BUILD_opencv_objdetect=Off \\\n # -D BUILD_opencv_video=Off \\\n # -D BUILD_opencv_videoio=Off \\\n # -D BUILD_opencv_videostab=Off \\\n # -D BUILD_opencv_ximgproc=Off \\\n # -D BUILD_opencv_xobjdetect=Off \\\n # -D BUILD_opencv_xphoto=Off \\\n # -D BUILD_opencv_datasets=Off \\\n # -D CXX_FLAGS="-std=c++11" \\ %TODO\n\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS\n # ENDBLOCK\n ').format(repo_dpath=ut.unexpanduser(tpl_rman['cv2'].dpath), **script_fmtdict))
tpl_rman['cv2'].add_script('install', ut.codeblock('\n # STARTBLOCK bash\n {python_bash_setup}\n\n cd $CODE_DIR/opencv/{build_dname}\n\n $_SUDO make install\n # Hack because cv2 does not want to be installed for some reason\n # cp lib/cv2.so $PYTHON_PACKAGES_PATH\n # Seems to work now that local is removed from prefix\n # cp -v lib/cv2.so $PYTHON_PACKAGES_PATH\n # Test makesure things working\n python -c "import numpy; print(numpy.__file__)"\n python -c "import numpy; print(numpy.__version__)"\n python -c "import cv2; print(cv2.__version__)"\n python -c "import cv2; print(cv2.__file__)"\n #python -c "import vtool"\n # Check if we have contrib modules\n python -c "import cv2; print(cv2.xfeatures2d)"\n # ENDBLOCK\n ').format(**script_fmtdict))
tpl_rman['libgpuarray'].add_script('build', ut.codeblock('\n # STARTBLOCK bash\n\n # Ensure the repo was checked out\n if [ ! -d {repo_dpath} ]; then\n git clone https://github.com/Theano/libgpuarray.git {repo_dpath}\n fi\n\n\n {python_bash_setup}\n cd {repo_dpath}\n\n # need a specific version of libgpuarray\n git checkout tags/v0.6.2 -b v0.6.2\n\n mkdir -p {repo_dpath}/{build_dname}\n cd {repo_dpath}/{build_dname}\n\n # First build the C library\n cmake {repo_dpath} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\n export NCPUS=$(grep -c ^processor /proc/cpuinfo)\n make -j$NCPUS\n $_SUDO make install\n\n # Now build the python libarary\n cd {repo_dpath}\n python setup.py build_ext -L $LOCAL_PREFIX/lib -I $LOCAL_PREFIX/include\n python setup.py build\n # python setup.py install\n $_SUDO pip install -e {repo_dpath}\n\n # DEVICE="<test device>" python -c "import pygpu;pygpu.test()"\n # DEVICE="gpu0" python -c "import pygpu;pygpu.test()"\n cd ~\n $_SUDO pip install nose\n DEVICE="cuda" python -c "import pygpu;pygpu.test()"\n\n # pip uninstall pygpu\n # ENDBLOCK\n ').format(repo_dpath=ut.unexpanduser(tpl_rman['libgpuarray'].dpath), **script_fmtdict))
if ut.in_virtual_env():
try:
fmtdict = {'sys_dist_packages': ut.get_global_dist_packages_dir(), 'venv_site_packages': ut.get_site_packages_dir(), 'pyqt': ('PyQt4' if PY2 else 'PyQt5'), 'debian-python-qt': ('python-qt4' if PY2 else 'qt5-default python3-pyqt5 debian-python-qt-svg'), 'pip-python-qt': ('python-qt4' if PY2 else 'python-qt5')}
system_to_venv = ut.codeblock('\n # STARTBLOCK bash\n # Creates a symlink to the global PyQt in a virtual env\n export GLOBAL_DIST_PACKAGES="{sys_dist_packages}"\n export VENV_DIST_PACKAGES="{venv_site_packages}"\n if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then\n echo "have qt"\n ls $GLOBAL_DIST_PACKAGES/{pyqt}\n ls $VENV_DIST_PACKAGES/{pyqt}\n else\n # Ensure PyQt is installed first (FIXME make this work for non-debian systems)\n sudo apt-get install {debian-python-qt}\n # pip install {pip-python-qt}\n fi\n if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then\n # Install system pyqt packages to virtual envirment via symlink\n ln -s $GLOBAL_DIST_PACKAGES/{pyqt}/ $VENV_DIST_PACKAGES/{pyqt}\n ln -s $GLOBAL_DIST_PACKAGES/sip*.so $VENV_DIST_PACKAGES/\n ln -s $GLOBAL_DIST_PACKAGES/sip*.py $VENV_DIST_PACKAGES/\n else\n echo "{pyqt} DOES NOT SEEM TO BE INSTALLED ON THE SYSTEM"\n fi\n echo "testing"\n python -c "import {pyqt}; print({pyqt})"\n # ENDBLOCK bash\n ').format(**fmtdict)
tpl_rman['PyQt'].add_script('system_to_venv', system_to_venv)
except NotImplementedError:
pass |
def is_running_as_root():
'\n References:\n http://stackoverflow.com/questions/5721529/running-python-script-as-root\n http://stackoverflow.com/questions/2806897/checking-script-has-root\n '
return (os.getenv('USER') == 'root') | 5,363,990,343,443,253,000 | References:
http://stackoverflow.com/questions/5721529/running-python-script-as-root
http://stackoverflow.com/questions/2806897/checking-script-has-root | super_setup.py | is_running_as_root | brmscheiner/ibeis | python | def is_running_as_root():
'\n References:\n http://stackoverflow.com/questions/5721529/running-python-script-as-root\n http://stackoverflow.com/questions/2806897/checking-script-has-root\n '
return (os.getenv('USER') == 'root') |
async def m001_initial(db):
'\n Initial wallet table.\n '
(await db.execute("\n CREATE TABLE IF NOT EXISTS charges (\n id TEXT NOT NULL PRIMARY KEY,\n user TEXT,\n description TEXT,\n onchainwallet TEXT,\n onchainaddress TEXT,\n lnbitswallet TEXT,\n payment_request TEXT,\n payment_hash TEXT,\n webhook TEXT,\n completelink TEXT,\n completelinktext TEXT,\n time INTEGER,\n amount INTEGER,\n balance INTEGER DEFAULT 0,\n timestamp TIMESTAMP NOT NULL DEFAULT (strftime('%s', 'now'))\n );\n ")) | 2,438,347,391,264,433,700 | Initial wallet table. | lnbits/extensions/satspay/migrations.py | m001_initial | bliotti/lnbits | python | async def m001_initial(db):
'\n \n '
(await db.execute("\n CREATE TABLE IF NOT EXISTS charges (\n id TEXT NOT NULL PRIMARY KEY,\n user TEXT,\n description TEXT,\n onchainwallet TEXT,\n onchainaddress TEXT,\n lnbitswallet TEXT,\n payment_request TEXT,\n payment_hash TEXT,\n webhook TEXT,\n completelink TEXT,\n completelinktext TEXT,\n time INTEGER,\n amount INTEGER,\n balance INTEGER DEFAULT 0,\n timestamp TIMESTAMP NOT NULL DEFAULT (strftime('%s', 'now'))\n );\n ")) |
def check_auth(args):
"\n Checks courseraprogramming's connectivity to the coursera.org API servers\n "
oauth2_instance = oauth2.build_oauth2(args)
auth = oauth2_instance.build_authorizer()
my_profile_url = 'https://api.coursera.org/api/externalBasicProfiles.v1?q=me&fields=name'
r = requests.get(my_profile_url, auth=auth)
if (r.status_code != 200):
logging.error('Received response code %s from the basic profile API.', r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error('Could not parse the external id out of the response body %s', r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error('Could not parse the name out of the response body %s', r.text)
name = None
if ((not args.quiet) or (args.quiet == 0)):
print(('Name: %s' % name))
print(('External ID: %s' % external_id))
if ((name is None) or (external_id is None)):
sys.exit(1) | -6,796,528,488,771,106,000 | Checks courseraprogramming's connectivity to the coursera.org API servers | courseraprogramming/commands/config.py | check_auth | andres-zartab/courseraprogramming | python | def check_auth(args):
"\n \n "
oauth2_instance = oauth2.build_oauth2(args)
auth = oauth2_instance.build_authorizer()
my_profile_url = 'https://api.coursera.org/api/externalBasicProfiles.v1?q=me&fields=name'
r = requests.get(my_profile_url, auth=auth)
if (r.status_code != 200):
logging.error('Received response code %s from the basic profile API.', r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error('Could not parse the external id out of the response body %s', r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error('Could not parse the name out of the response body %s', r.text)
name = None
if ((not args.quiet) or (args.quiet == 0)):
print(('Name: %s' % name))
print(('External ID: %s' % external_id))
if ((name is None) or (external_id is None)):
sys.exit(1) |
def display_auth_cache(args):
'\n Writes to the screen the state of the authentication cache. (For debugging\n authentication issues.) BEWARE: DO NOT email the output of this command!!!\n You must keep the tokens secure. Treat them as passwords.\n '
oauth2_instance = oauth2.build_oauth2(args)
if ((not args.quiet) or (args.quiet == 0)):
token = oauth2_instance.token_cache['token']
if ((not args.no_truncate) and (token is not None)):
token = (token[:10] + '...')
print(('Auth token: %s' % token))
expires_time = oauth2_instance.token_cache['expires']
expires_in = (int(((expires_time - time.time()) * 10)) / 10.0)
print(('Auth token expires in: %s seconds.' % expires_in))
if ('refresh' in oauth2_instance.token_cache):
refresh = oauth2_instance.token_cache['refresh']
if ((not args.no_truncate) and (refresh is not None)):
refresh = (refresh[:10] + '...')
print(('Refresh token: %s' % refresh))
else:
print('No refresh token found.') | -7,257,325,178,374,705,000 | Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords. | courseraprogramming/commands/config.py | display_auth_cache | andres-zartab/courseraprogramming | python | def display_auth_cache(args):
'\n Writes to the screen the state of the authentication cache. (For debugging\n authentication issues.) BEWARE: DO NOT email the output of this command!!!\n You must keep the tokens secure. Treat them as passwords.\n '
oauth2_instance = oauth2.build_oauth2(args)
if ((not args.quiet) or (args.quiet == 0)):
token = oauth2_instance.token_cache['token']
if ((not args.no_truncate) and (token is not None)):
token = (token[:10] + '...')
print(('Auth token: %s' % token))
expires_time = oauth2_instance.token_cache['expires']
expires_in = (int(((expires_time - time.time()) * 10)) / 10.0)
print(('Auth token expires in: %s seconds.' % expires_in))
if ('refresh' in oauth2_instance.token_cache):
refresh = oauth2_instance.token_cache['refresh']
if ((not args.no_truncate) and (refresh is not None)):
refresh = (refresh[:10] + '...')
print(('Refresh token: %s' % refresh))
else:
print('No refresh token found.') |
def parser(subparsers):
'Build an argparse argument parser to parse the command line.'
parser_config = subparsers.add_parser('configure', help='Configure %(prog)s for operation!')
config_subparsers = parser_config.add_subparsers()
parser_check_auth = config_subparsers.add_parser('check-auth', help=check_auth.__doc__)
parser_check_auth.set_defaults(func=check_auth)
parser_local_cache = config_subparsers.add_parser('display-auth-cache', help=display_auth_cache.__doc__)
parser_local_cache.set_defaults(func=display_auth_cache)
parser_local_cache.add_argument('--no-truncate', action='store_true', help='Do not truncate the keys [DANGER!!]')
return parser_config | -8,423,254,587,157,504,000 | Build an argparse argument parser to parse the command line. | courseraprogramming/commands/config.py | parser | andres-zartab/courseraprogramming | python | def parser(subparsers):
parser_config = subparsers.add_parser('configure', help='Configure %(prog)s for operation!')
config_subparsers = parser_config.add_subparsers()
parser_check_auth = config_subparsers.add_parser('check-auth', help=check_auth.__doc__)
parser_check_auth.set_defaults(func=check_auth)
parser_local_cache = config_subparsers.add_parser('display-auth-cache', help=display_auth_cache.__doc__)
parser_local_cache.set_defaults(func=display_auth_cache)
parser_local_cache.add_argument('--no-truncate', action='store_true', help='Do not truncate the keys [DANGER!!]')
return parser_config |
def forward(self, img):
'\n Your code here\n Predict the aim point in image coordinate, given the supertuxkart image\n @img: (B,3,96,128)\n return (B,2)\n '
x = self._conv(img)
return spatial_argmax(x[:, 0]) | 5,731,895,734,911,100,000 | Your code here
Predict the aim point in image coordinate, given the supertuxkart image
@img: (B,3,96,128)
return (B,2) | planner/regressor/models.py | forward | aljubrmj/CS342-Final-Project | python | def forward(self, img):
'\n Your code here\n Predict the aim point in image coordinate, given the supertuxkart image\n @img: (B,3,96,128)\n return (B,2)\n '
x = self._conv(img)
return spatial_argmax(x[:, 0]) |
def make_state_space_controller(Phi_x, Phi_u, n, p):
'\n Converts FIR transfer functions to a state\n space realization of the dynamic controller,\n mapping states to inputs.\n\n '
assert (len(Phi_x.shape) == 2)
assert (len(Phi_u.shape) == 2)
assert (Phi_x.shape[1] == n)
assert (Phi_u.shape[1] == n)
(nT, _) = Phi_x.shape
(pT, _) = Phi_u.shape
assert ((nT % n) == 0)
assert ((pT % p) == 0)
T = (nT // n)
assert (T == (pT // p))
Z = np.diag(np.ones((n * (T - 2))), k=(- n))
assert (Z.shape == (((T - 1) * n), ((T - 1) * n)))
calI = np.zeros(((n * (T - 1)), n))
calI[:n, :] = np.eye(n)
Rhat = np.hstack([Phi_x[(n * k):(n * (k + 1)), :] for k in range(1, T)])
Mhat = np.hstack([Phi_u[(p * k):(p * (k + 1)), :] for k in range(1, T)])
M1 = Phi_u[:p, :]
R1 = Phi_x[:n, :]
A = (Z - calI.dot(Rhat))
B = (- calI)
C = (M1.dot(Rhat) - Mhat)
D = M1
return (A, B, C, D) | -5,705,819,204,201,010,000 | Converts FIR transfer functions to a state
space realization of the dynamic controller,
mapping states to inputs. | python/sls.py | make_state_space_controller | DuttaAbhigyan/robust-adaptive-lqr | python | def make_state_space_controller(Phi_x, Phi_u, n, p):
'\n Converts FIR transfer functions to a state\n space realization of the dynamic controller,\n mapping states to inputs.\n\n '
assert (len(Phi_x.shape) == 2)
assert (len(Phi_u.shape) == 2)
assert (Phi_x.shape[1] == n)
assert (Phi_u.shape[1] == n)
(nT, _) = Phi_x.shape
(pT, _) = Phi_u.shape
assert ((nT % n) == 0)
assert ((pT % p) == 0)
T = (nT // n)
assert (T == (pT // p))
Z = np.diag(np.ones((n * (T - 2))), k=(- n))
assert (Z.shape == (((T - 1) * n), ((T - 1) * n)))
calI = np.zeros(((n * (T - 1)), n))
calI[:n, :] = np.eye(n)
Rhat = np.hstack([Phi_x[(n * k):(n * (k + 1)), :] for k in range(1, T)])
Mhat = np.hstack([Phi_u[(p * k):(p * (k + 1)), :] for k in range(1, T)])
M1 = Phi_u[:p, :]
R1 = Phi_x[:n, :]
A = (Z - calI.dot(Rhat))
B = (- calI)
C = (M1.dot(Rhat) - Mhat)
D = M1
return (A, B, C, D) |
def h2_squared_norm(A, B, Phi_x, Phi_u, Q, R, sigma_w):
'\n Gets the squared infinite horizon LQR cost for system\n (A,B) in feedback with the controller defined by Phi_x\n and Phi_u. \n\n '
(n, p) = B.shape
(A_k, B_k, C_k, D_k) = make_state_space_controller(Phi_x, Phi_u, n, p)
A_cl = np.block([[(A + B.dot(D_k)), B.dot(C_k)], [B_k, A_k]])
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
C_cl = np.block([[Q_sqrt, np.zeros((n, A_k.shape[0]))], [R_sqrt.dot(D_k), R_sqrt.dot(C_k)]])
B_cl = np.vstack((np.eye(n), np.zeros((A_k.shape[0], n))))
P = utils.solve_discrete_lyapunov(A_cl.T, B_cl.dot(B_cl.T))
return ((sigma_w ** 2) * np.trace(C_cl.dot(P).dot(C_cl.T))) | 702,728,122,757,241,200 | Gets the squared infinite horizon LQR cost for system
(A,B) in feedback with the controller defined by Phi_x
and Phi_u. | python/sls.py | h2_squared_norm | DuttaAbhigyan/robust-adaptive-lqr | python | def h2_squared_norm(A, B, Phi_x, Phi_u, Q, R, sigma_w):
'\n Gets the squared infinite horizon LQR cost for system\n (A,B) in feedback with the controller defined by Phi_x\n and Phi_u. \n\n '
(n, p) = B.shape
(A_k, B_k, C_k, D_k) = make_state_space_controller(Phi_x, Phi_u, n, p)
A_cl = np.block([[(A + B.dot(D_k)), B.dot(C_k)], [B_k, A_k]])
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
C_cl = np.block([[Q_sqrt, np.zeros((n, A_k.shape[0]))], [R_sqrt.dot(D_k), R_sqrt.dot(C_k)]])
B_cl = np.vstack((np.eye(n), np.zeros((A_k.shape[0], n))))
P = utils.solve_discrete_lyapunov(A_cl.T, B_cl.dot(B_cl.T))
return ((sigma_w ** 2) * np.trace(C_cl.dot(P).dot(C_cl.T))) |
def roll_forward(A, B, K, x0, psi0, sigma_w, horizon, rng=None):
'Apply an LTI controller K = (A_k,B_k,C_k,D_k)\n\n Roll the true system (A, B) forward with the SS realization of the LTI\n controller given. horizon is the length of the trajectory, and\n sigma_w is the stddev of the Gaussian process noise.\n\n '
if (rng is None):
rng = np.random
_assert_AB_consistent(A, B)
(A_k, B_k, C_k, D_k) = K
_assert_ABCD_consistent(A_k, B_k, C_k, D_k)
(state_dim, input_dim) = B.shape
psi_dim = A_k.shape[0]
assert (C_k.shape[0] == input_dim)
assert (B_k.shape[1] == state_dim)
if (x0 is None):
x0 = np.zeros((state_dim,))
if (psi0 is None):
psi0 = np.zeros((psi_dim,))
assert (x0.shape == (state_dim,))
assert (psi0.shape == (psi_dim,))
process = (sigma_w * rng.normal(size=(horizon, state_dim)))
xt = np.array(x0)
psit = np.array(psi0)
states = np.zeros(((horizon + 1), state_dim))
inputs = np.zeros((horizon, input_dim))
controller_states = np.zeros(((horizon + 1), psi_dim))
states[0, :] = x0
controller_states[0, :] = psi0
for t in range(horizon):
psitp1 = (A_k.dot(psit) + B_k.dot(xt))
ut = (C_k.dot(psit) + D_k.dot(xt))
xtp1 = ((A.dot(xt) + B.dot(ut)) + process[t])
inputs[t, :] = ut
states[(t + 1), :] = xtp1
controller_states[(t + 1), :] = psitp1
xt = xtp1
psit = psitp1
return (states, inputs, controller_states) | -8,283,106,643,702,540,000 | Apply an LTI controller K = (A_k,B_k,C_k,D_k)
Roll the true system (A, B) forward with the SS realization of the LTI
controller given. horizon is the length of the trajectory, and
sigma_w is the stddev of the Gaussian process noise. | python/sls.py | roll_forward | DuttaAbhigyan/robust-adaptive-lqr | python | def roll_forward(A, B, K, x0, psi0, sigma_w, horizon, rng=None):
'Apply an LTI controller K = (A_k,B_k,C_k,D_k)\n\n Roll the true system (A, B) forward with the SS realization of the LTI\n controller given. horizon is the length of the trajectory, and\n sigma_w is the stddev of the Gaussian process noise.\n\n '
if (rng is None):
rng = np.random
_assert_AB_consistent(A, B)
(A_k, B_k, C_k, D_k) = K
_assert_ABCD_consistent(A_k, B_k, C_k, D_k)
(state_dim, input_dim) = B.shape
psi_dim = A_k.shape[0]
assert (C_k.shape[0] == input_dim)
assert (B_k.shape[1] == state_dim)
if (x0 is None):
x0 = np.zeros((state_dim,))
if (psi0 is None):
psi0 = np.zeros((psi_dim,))
assert (x0.shape == (state_dim,))
assert (psi0.shape == (psi_dim,))
process = (sigma_w * rng.normal(size=(horizon, state_dim)))
xt = np.array(x0)
psit = np.array(psi0)
states = np.zeros(((horizon + 1), state_dim))
inputs = np.zeros((horizon, input_dim))
controller_states = np.zeros(((horizon + 1), psi_dim))
states[0, :] = x0
controller_states[0, :] = psi0
for t in range(horizon):
psitp1 = (A_k.dot(psit) + B_k.dot(xt))
ut = (C_k.dot(psit) + D_k.dot(xt))
xtp1 = ((A.dot(xt) + B.dot(ut)) + process[t])
inputs[t, :] = ut
states[(t + 1), :] = xtp1
controller_states[(t + 1), :] = psitp1
xt = xtp1
psit = psitp1
return (states, inputs, controller_states) |
def sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, T, gamma, alpha, logger=None):
'\n Solves the SLS synthesis problem for length T FIR filters\n using CVXPY\n\n '
assert ((len(Q.shape) == 2) and (Q.shape[0] == Q.shape[1]))
assert ((len(R.shape) == 2) and (R.shape[0] == R.shape[1]))
assert ((len(Ahat.shape) == 2) and (Ahat.shape[0] == Ahat.shape[1]))
assert ((len(Bhat.shape) == 2) and (Bhat.shape[0] == Ahat.shape[0]))
assert (Q.shape[0] == Ahat.shape[0])
assert (R.shape[0] == Bhat.shape[1])
assert (eps_A >= 0)
assert (eps_B >= 0)
assert (T >= 1)
assert ((gamma > 0) and (gamma < 1))
assert ((alpha > 0) and (alpha < 1))
if (logger is None):
logger = logging.getLogger(__name__)
(n, p) = Bhat.shape
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
Phi_x = cvx.Variable((T * n), n, name='Phi_x')
Phi_u = cvx.Variable((T * p), n, name='Phi_u')
htwo_cost = cvx.Variable(name='htwo_cost')
constr = []
constr.append((Phi_x[:n, :] == np.eye(n)))
for k in range((T - 1)):
constr.append((Phi_x[(n * (k + 1)):(n * ((k + 1) + 1)), :] == ((Ahat * Phi_x[(n * k):(n * (k + 1)), :]) + (Bhat * Phi_u[(p * k):(p * (k + 1)), :]))))
constr.append((((Ahat * Phi_x[(n * (T - 1)):, :]) + (Bhat * Phi_u[(p * (T - 1)):, :])) == 0))
constr.append((cvx.norm(cvx.bmat(([[(Q_sqrt * Phi_x[(n * k):(n * (k + 1)), :])] for k in range(T)] + [[(R_sqrt * Phi_u[(p * k):(p * (k + 1)), :])] for k in range(T)])), 'fro') <= htwo_cost))
mult_x = (eps_A / np.sqrt(alpha))
mult_u = (eps_B / np.sqrt((1 - alpha)))
Hbar = cvx.bmat(([[np.zeros((n, n)), np.zeros((n, p))]] + [[(mult_x * Phi_x[(n * k):(n * (k + 1)), :].T), (mult_u * Phi_u[(p * k):(p * (k + 1)), :].T)] for k in range(T)]))
Q = cvx.Semidef((n * (T + 1)), name='Q')
gamma_sq = (gamma ** 2)
constr.append((sum([Q[(n * t):(n * (t + 1)), (n * t):(n * (t + 1))] for t in range((T + 1))]) == (gamma_sq * np.eye(n))))
for k in range(1, (T + 1)):
constr.append((sum([Q[(n * t):(n * (t + 1)), (n * (t + k)):(n * ((t + 1) + k))] for t in range(((T + 1) - k))]) == np.zeros((n, n))))
constr.append((cvx.bmat([[Q, Hbar], [Hbar.T, np.eye((n + p))]]) == cvx.Semidef(((n * (T + 1)) + (n + p)))))
prob = cvx.Problem(cvx.Minimize(htwo_cost), constr)
prob.solve(solver=cvx.SCS)
if (prob.status == cvx.OPTIMAL):
logging.debug('successfully solved!')
Phi_x = np.array(Phi_x.value)
Phi_u = np.array(Phi_u.value)
return (True, prob.value, Phi_x, Phi_u)
else:
logging.debug('could not solve: {}'.format(prob.status))
return (False, None, None, None) | -6,528,772,309,714,675,000 | Solves the SLS synthesis problem for length T FIR filters
using CVXPY | python/sls.py | sls_synth | DuttaAbhigyan/robust-adaptive-lqr | python | def sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, T, gamma, alpha, logger=None):
'\n Solves the SLS synthesis problem for length T FIR filters\n using CVXPY\n\n '
assert ((len(Q.shape) == 2) and (Q.shape[0] == Q.shape[1]))
assert ((len(R.shape) == 2) and (R.shape[0] == R.shape[1]))
assert ((len(Ahat.shape) == 2) and (Ahat.shape[0] == Ahat.shape[1]))
assert ((len(Bhat.shape) == 2) and (Bhat.shape[0] == Ahat.shape[0]))
assert (Q.shape[0] == Ahat.shape[0])
assert (R.shape[0] == Bhat.shape[1])
assert (eps_A >= 0)
assert (eps_B >= 0)
assert (T >= 1)
assert ((gamma > 0) and (gamma < 1))
assert ((alpha > 0) and (alpha < 1))
if (logger is None):
logger = logging.getLogger(__name__)
(n, p) = Bhat.shape
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
Phi_x = cvx.Variable((T * n), n, name='Phi_x')
Phi_u = cvx.Variable((T * p), n, name='Phi_u')
htwo_cost = cvx.Variable(name='htwo_cost')
constr = []
constr.append((Phi_x[:n, :] == np.eye(n)))
for k in range((T - 1)):
constr.append((Phi_x[(n * (k + 1)):(n * ((k + 1) + 1)), :] == ((Ahat * Phi_x[(n * k):(n * (k + 1)), :]) + (Bhat * Phi_u[(p * k):(p * (k + 1)), :]))))
constr.append((((Ahat * Phi_x[(n * (T - 1)):, :]) + (Bhat * Phi_u[(p * (T - 1)):, :])) == 0))
constr.append((cvx.norm(cvx.bmat(([[(Q_sqrt * Phi_x[(n * k):(n * (k + 1)), :])] for k in range(T)] + [[(R_sqrt * Phi_u[(p * k):(p * (k + 1)), :])] for k in range(T)])), 'fro') <= htwo_cost))
mult_x = (eps_A / np.sqrt(alpha))
mult_u = (eps_B / np.sqrt((1 - alpha)))
Hbar = cvx.bmat(([[np.zeros((n, n)), np.zeros((n, p))]] + [[(mult_x * Phi_x[(n * k):(n * (k + 1)), :].T), (mult_u * Phi_u[(p * k):(p * (k + 1)), :].T)] for k in range(T)]))
Q = cvx.Semidef((n * (T + 1)), name='Q')
gamma_sq = (gamma ** 2)
constr.append((sum([Q[(n * t):(n * (t + 1)), (n * t):(n * (t + 1))] for t in range((T + 1))]) == (gamma_sq * np.eye(n))))
for k in range(1, (T + 1)):
constr.append((sum([Q[(n * t):(n * (t + 1)), (n * (t + k)):(n * ((t + 1) + k))] for t in range(((T + 1) - k))]) == np.zeros((n, n))))
constr.append((cvx.bmat([[Q, Hbar], [Hbar.T, np.eye((n + p))]]) == cvx.Semidef(((n * (T + 1)) + (n + p)))))
prob = cvx.Problem(cvx.Minimize(htwo_cost), constr)
prob.solve(solver=cvx.SCS)
if (prob.status == cvx.OPTIMAL):
logging.debug('successfully solved!')
Phi_x = np.array(Phi_x.value)
Phi_u = np.array(Phi_u.value)
return (True, prob.value, Phi_x, Phi_u)
else:
logging.debug('could not solve: {}'.format(prob.status))
return (False, None, None, None) |
def sls_common_lyapunov(A, B, Q, R, eps_A, eps_B, tau, logger=None):
'\n Solves the common Lyapunov relaxation to the robust \n synthesis problem.\n\n Taken from\n lstd-lqr/blob/master/code/policy_iteration.ipynb\n learning-lqr/experiments/matlab/sls_synth_yalmip/common_lyap_synth_var2_alpha.m\n\n '
if (logger is None):
logger = logging.getLogger(__name__)
(d, p) = B.shape
X = cvx.Symmetric(d)
Z = cvx.Variable(p, d)
W_11 = cvx.Symmetric(d)
W_12 = cvx.Variable(d, p)
W_22 = cvx.Symmetric(p)
alph = cvx.Variable()
constraints = []
mat1 = cvx.bmat([[X, X, Z.T], [X, W_11, W_12], [Z, W_12.T, W_22]])
constraints.append((mat1 == cvx.Semidef(((2 * d) + p))))
mat2 = cvx.bmat([[(X - np.eye(d)), ((A * X) + (B * Z)), np.zeros((d, d)), np.zeros((d, p))], [((X * A.T) + (Z.T * B.T)), X, (eps_A * X), (eps_B * Z.T)], [np.zeros((d, d)), (eps_A * X), ((alph * (tau ** 2)) * np.eye(d)), np.zeros((d, p))], [np.zeros((p, d)), (eps_B * Z), np.zeros((p, d)), (((1 - alph) * (tau ** 2)) * np.eye(p))]])
constraints.append((mat2 == cvx.Semidef(((3 * d) + p))))
constraints.append((alph >= 0))
constraints.append((alph <= 1))
objective = cvx.Minimize((cvx.trace((Q * W_11)) + cvx.trace((R * W_22))))
prob = cvx.Problem(objective, constraints)
try:
obj = prob.solve(solver=cvx.MOSEK)
except cvx.SolverError:
logger.warn('SolverError encountered')
return (False, None, None, None)
if (prob.status == cvx.OPTIMAL):
logging.debug('common_lyapunov: found optimal solution')
X_value = np.array(X.value)
P_value = scipy.linalg.solve(X_value, np.eye(d), sym_pos=True)
K_value = np.array(Z.value).dot(P_value)
return (True, obj, P_value, K_value)
else:
logging.debug('common_lyapunov: could not solve (status={})'.format(prob.status))
return (False, None, None, None) | 1,971,846,414,437,250,600 | Solves the common Lyapunov relaxation to the robust
synthesis problem.
Taken from
lstd-lqr/blob/master/code/policy_iteration.ipynb
learning-lqr/experiments/matlab/sls_synth_yalmip/common_lyap_synth_var2_alpha.m | python/sls.py | sls_common_lyapunov | DuttaAbhigyan/robust-adaptive-lqr | python | def sls_common_lyapunov(A, B, Q, R, eps_A, eps_B, tau, logger=None):
'\n Solves the common Lyapunov relaxation to the robust \n synthesis problem.\n\n Taken from\n lstd-lqr/blob/master/code/policy_iteration.ipynb\n learning-lqr/experiments/matlab/sls_synth_yalmip/common_lyap_synth_var2_alpha.m\n\n '
if (logger is None):
logger = logging.getLogger(__name__)
(d, p) = B.shape
X = cvx.Symmetric(d)
Z = cvx.Variable(p, d)
W_11 = cvx.Symmetric(d)
W_12 = cvx.Variable(d, p)
W_22 = cvx.Symmetric(p)
alph = cvx.Variable()
constraints = []
mat1 = cvx.bmat([[X, X, Z.T], [X, W_11, W_12], [Z, W_12.T, W_22]])
constraints.append((mat1 == cvx.Semidef(((2 * d) + p))))
mat2 = cvx.bmat([[(X - np.eye(d)), ((A * X) + (B * Z)), np.zeros((d, d)), np.zeros((d, p))], [((X * A.T) + (Z.T * B.T)), X, (eps_A * X), (eps_B * Z.T)], [np.zeros((d, d)), (eps_A * X), ((alph * (tau ** 2)) * np.eye(d)), np.zeros((d, p))], [np.zeros((p, d)), (eps_B * Z), np.zeros((p, d)), (((1 - alph) * (tau ** 2)) * np.eye(p))]])
constraints.append((mat2 == cvx.Semidef(((3 * d) + p))))
constraints.append((alph >= 0))
constraints.append((alph <= 1))
objective = cvx.Minimize((cvx.trace((Q * W_11)) + cvx.trace((R * W_22))))
prob = cvx.Problem(objective, constraints)
try:
obj = prob.solve(solver=cvx.MOSEK)
except cvx.SolverError:
logger.warn('SolverError encountered')
return (False, None, None, None)
if (prob.status == cvx.OPTIMAL):
logging.debug('common_lyapunov: found optimal solution')
X_value = np.array(X.value)
P_value = scipy.linalg.solve(X_value, np.eye(d), sym_pos=True)
K_value = np.array(Z.value).dot(P_value)
return (True, obj, P_value, K_value)
else:
logging.debug('common_lyapunov: could not solve (status={})'.format(prob.status))
return (False, None, None, None) |
@staticmethod
def compute_spawn(adjusted_fitness, previous_sizes, pop_size, min_species_size):
'Compute the proper number of offspring per species (proportional to fitness).'
af_sum = sum(adjusted_fitness)
spawn_amounts = []
for (af, ps) in zip(adjusted_fitness, previous_sizes):
if (af_sum > 0):
s = max(min_species_size, ((af / af_sum) * pop_size))
else:
s = min_species_size
d = ((s - ps) * 0.5)
c = int(round(d))
spawn = ps
if (abs(c) > 0):
spawn += c
elif (d > 0):
spawn += 1
elif (d < 0):
spawn -= 1
spawn_amounts.append(spawn)
total_spawn = sum(spawn_amounts)
norm = (pop_size / total_spawn)
spawn_amounts = [max(min_species_size, int(round((n * norm)))) for n in spawn_amounts]
return spawn_amounts | 5,170,954,133,230,951,000 | Compute the proper number of offspring per species (proportional to fitness). | neat_local/reproduction.py | compute_spawn | Osrip/Novelty_criticality_PyTorch-NEAT | python | @staticmethod
def compute_spawn(adjusted_fitness, previous_sizes, pop_size, min_species_size):
af_sum = sum(adjusted_fitness)
spawn_amounts = []
for (af, ps) in zip(adjusted_fitness, previous_sizes):
if (af_sum > 0):
s = max(min_species_size, ((af / af_sum) * pop_size))
else:
s = min_species_size
d = ((s - ps) * 0.5)
c = int(round(d))
spawn = ps
if (abs(c) > 0):
spawn += c
elif (d > 0):
spawn += 1
elif (d < 0):
spawn -= 1
spawn_amounts.append(spawn)
total_spawn = sum(spawn_amounts)
norm = (pop_size / total_spawn)
spawn_amounts = [max(min_species_size, int(round((n * norm)))) for n in spawn_amounts]
return spawn_amounts |
def reproduce(self, config, species, pop_size, generation):
'\n Handles creation of genomes, either from scratch or by sexual or\n asexual reproduction from parents.\n '
all_fitnesses = []
remaining_species = []
for (stag_sid, stag_s, stagnant) in self.stagnation.update(species, generation):
if stagnant:
self.reporters.species_stagnant(stag_sid, stag_s)
else:
all_fitnesses.extend((m.fitness for m in itervalues(stag_s.members)))
remaining_species.append(stag_s)
if (not remaining_species):
species.species = {}
return {}
min_fitness = min(all_fitnesses)
max_fitness = max(all_fitnesses)
fitness_range = max(1.0, (max_fitness - min_fitness))
for afs in remaining_species:
msf = mean([m.fitness for m in itervalues(afs.members)])
af = ((msf - min_fitness) / fitness_range)
afs.adjusted_fitness = af
adjusted_fitnesses = [s.adjusted_fitness for s in remaining_species]
avg_adjusted_fitness = mean(adjusted_fitnesses)
self.reporters.info('Average adjusted fitness: {:.3f}'.format(avg_adjusted_fitness))
previous_sizes = [len(s.members) for s in remaining_species]
min_species_size = self.reproduction_config.min_species_size
min_species_size = max(min_species_size, self.reproduction_config.elitism)
spawn_amounts = self.compute_spawn(adjusted_fitnesses, previous_sizes, pop_size, min_species_size)
new_population = {}
species.species = {}
for (spawn, s) in zip(spawn_amounts, remaining_species):
spawn = max(spawn, self.reproduction_config.elitism)
assert (spawn > 0)
old_members = list(iteritems(s.members))
s.members = {}
species.species[s.key] = s
old_members.sort(reverse=True, key=(lambda x: x[1].fitness))
if (self.reproduction_config.elitism > 0):
for (i, m) in old_members[:self.reproduction_config.elitism]:
new_population[i] = m
spawn -= 1
if (spawn <= 0):
continue
repro_cutoff = int(math.ceil((self.reproduction_config.survival_threshold * len(old_members))))
repro_cutoff = max(repro_cutoff, 2)
old_members = old_members[:repro_cutoff]
while (spawn > 0):
spawn -= 1
(parent1_id, parent1) = random.choice(old_members)
(parent2_id, parent2) = random.choice(old_members)
gid = next(self.genome_indexer)
child = config.genome_type(gid)
child.configure_crossover(parent1, parent2, config.genome_config)
child.mutate(config.genome_config)
new_population[gid] = child
self.ancestors[gid] = (parent1_id, parent2_id)
return new_population | 4,567,077,984,153,027,000 | Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents. | neat_local/reproduction.py | reproduce | Osrip/Novelty_criticality_PyTorch-NEAT | python | def reproduce(self, config, species, pop_size, generation):
'\n Handles creation of genomes, either from scratch or by sexual or\n asexual reproduction from parents.\n '
all_fitnesses = []
remaining_species = []
for (stag_sid, stag_s, stagnant) in self.stagnation.update(species, generation):
if stagnant:
self.reporters.species_stagnant(stag_sid, stag_s)
else:
all_fitnesses.extend((m.fitness for m in itervalues(stag_s.members)))
remaining_species.append(stag_s)
if (not remaining_species):
species.species = {}
return {}
min_fitness = min(all_fitnesses)
max_fitness = max(all_fitnesses)
fitness_range = max(1.0, (max_fitness - min_fitness))
for afs in remaining_species:
msf = mean([m.fitness for m in itervalues(afs.members)])
af = ((msf - min_fitness) / fitness_range)
afs.adjusted_fitness = af
adjusted_fitnesses = [s.adjusted_fitness for s in remaining_species]
avg_adjusted_fitness = mean(adjusted_fitnesses)
self.reporters.info('Average adjusted fitness: {:.3f}'.format(avg_adjusted_fitness))
previous_sizes = [len(s.members) for s in remaining_species]
min_species_size = self.reproduction_config.min_species_size
min_species_size = max(min_species_size, self.reproduction_config.elitism)
spawn_amounts = self.compute_spawn(adjusted_fitnesses, previous_sizes, pop_size, min_species_size)
new_population = {}
species.species = {}
for (spawn, s) in zip(spawn_amounts, remaining_species):
spawn = max(spawn, self.reproduction_config.elitism)
assert (spawn > 0)
old_members = list(iteritems(s.members))
s.members = {}
species.species[s.key] = s
old_members.sort(reverse=True, key=(lambda x: x[1].fitness))
if (self.reproduction_config.elitism > 0):
for (i, m) in old_members[:self.reproduction_config.elitism]:
new_population[i] = m
spawn -= 1
if (spawn <= 0):
continue
repro_cutoff = int(math.ceil((self.reproduction_config.survival_threshold * len(old_members))))
repro_cutoff = max(repro_cutoff, 2)
old_members = old_members[:repro_cutoff]
while (spawn > 0):
spawn -= 1
(parent1_id, parent1) = random.choice(old_members)
(parent2_id, parent2) = random.choice(old_members)
gid = next(self.genome_indexer)
child = config.genome_type(gid)
child.configure_crossover(parent1, parent2, config.genome_config)
child.mutate(config.genome_config)
new_population[gid] = child
self.ancestors[gid] = (parent1_id, parent2_id)
return new_population |
def get_config_file_for_auto_config(self) -> Optional[Text]:
'Returns config file path for auto-config only if there is a single one.'
return None | 2,301,328,750,709,688,300 | Returns config file path for auto-config only if there is a single one. | rasa/shared/importers/multi_project.py | get_config_file_for_auto_config | mukulbalodi/rasa | python | def get_config_file_for_auto_config(self) -> Optional[Text]:
return None |
def training_paths(self) -> Set[Text]:
'Returns the paths which should be searched for training data.'
training_paths = {i for i in self._imports if ((not self._project_directory) or (self._project_directory not in i))}
if self._project_directory:
training_paths.add(self._project_directory)
return training_paths | 1,007,461,871,642,578,800 | Returns the paths which should be searched for training data. | rasa/shared/importers/multi_project.py | training_paths | mukulbalodi/rasa | python | def training_paths(self) -> Set[Text]:
training_paths = {i for i in self._imports if ((not self._project_directory) or (self._project_directory not in i))}
if self._project_directory:
training_paths.add(self._project_directory)
return training_paths |
def is_imported(self, path: Text) -> bool:
'\n Checks whether a path is imported by a skill.\n Args:\n path: File or directory path which should be checked.\n\n Returns:\n `True` if path is imported by a skill, `False` if not.\n '
absolute_path = os.path.abspath(path)
return (self.no_skills_selected() or self._is_in_project_directory(absolute_path) or self._is_in_additional_paths(absolute_path) or self._is_in_imported_paths(absolute_path)) | -3,549,763,832,062,905,000 | Checks whether a path is imported by a skill.
Args:
path: File or directory path which should be checked.
Returns:
`True` if path is imported by a skill, `False` if not. | rasa/shared/importers/multi_project.py | is_imported | mukulbalodi/rasa | python | def is_imported(self, path: Text) -> bool:
'\n Checks whether a path is imported by a skill.\n Args:\n path: File or directory path which should be checked.\n\n Returns:\n `True` if path is imported by a skill, `False` if not.\n '
absolute_path = os.path.abspath(path)
return (self.no_skills_selected() or self._is_in_project_directory(absolute_path) or self._is_in_additional_paths(absolute_path) or self._is_in_imported_paths(absolute_path)) |
def get_domain(self) -> Domain:
'Retrieves model domain (see parent class for full docstring).'
domains = [Domain.load(path) for path in self._domain_paths]
return reduce((lambda merged, other: merged.merge(other)), domains, Domain.empty()) | 1,445,008,905,624,248,800 | Retrieves model domain (see parent class for full docstring). | rasa/shared/importers/multi_project.py | get_domain | mukulbalodi/rasa | python | def get_domain(self) -> Domain:
domains = [Domain.load(path) for path in self._domain_paths]
return reduce((lambda merged, other: merged.merge(other)), domains, Domain.empty()) |
def get_stories(self, exclusion_percentage: Optional[int]=None) -> StoryGraph:
'Retrieves training stories / rules (see parent class for full docstring).'
return utils.story_graph_from_paths(self._story_paths, self.get_domain(), exclusion_percentage) | -8,375,057,380,450,402,000 | Retrieves training stories / rules (see parent class for full docstring). | rasa/shared/importers/multi_project.py | get_stories | mukulbalodi/rasa | python | def get_stories(self, exclusion_percentage: Optional[int]=None) -> StoryGraph:
return utils.story_graph_from_paths(self._story_paths, self.get_domain(), exclusion_percentage) |
def get_conversation_tests(self) -> StoryGraph:
'Retrieves conversation test stories (see parent class for full docstring).'
return utils.story_graph_from_paths(self._e2e_story_paths, self.get_domain()) | -1,269,685,561,277,907,700 | Retrieves conversation test stories (see parent class for full docstring). | rasa/shared/importers/multi_project.py | get_conversation_tests | mukulbalodi/rasa | python | def get_conversation_tests(self) -> StoryGraph:
return utils.story_graph_from_paths(self._e2e_story_paths, self.get_domain()) |
def get_config(self) -> Dict:
'Retrieves model config (see parent class for full docstring).'
return self.config | -5,574,769,534,619,931,000 | Retrieves model config (see parent class for full docstring). | rasa/shared/importers/multi_project.py | get_config | mukulbalodi/rasa | python | def get_config(self) -> Dict:
return self.config |
def get_nlu_data(self, language: Optional[Text]='en') -> TrainingData:
'Retrieves NLU training data (see parent class for full docstring).'
return utils.training_data_from_paths(self._nlu_paths, language) | 6,064,696,292,484,832,000 | Retrieves NLU training data (see parent class for full docstring). | rasa/shared/importers/multi_project.py | get_nlu_data | mukulbalodi/rasa | python | def get_nlu_data(self, language: Optional[Text]='en') -> TrainingData:
return utils.training_data_from_paths(self._nlu_paths, language) |
def encode(value, unpicklable=True, make_refs=True, keys=False, max_depth=None, reset=True, backend=None, warn=False, context=None, max_iter=None, use_decimal=False, numeric_keys=False, use_base85=False, fail_safe=None, indent=None, separators=None):
'Return a JSON formatted representation of value, a Python object.\n\n :param unpicklable: If set to False then the output will not contain the\n information necessary to turn the JSON data back into Python objects,\n but a simpler JSON stream is produced.\n :param max_depth: If set to a non-negative integer then jsonpickle will\n not recurse deeper than \'max_depth\' steps into the object. Anything\n deeper than \'max_depth\' is represented using a Python repr() of the\n object.\n :param make_refs: If set to False jsonpickle\'s referencing support is\n disabled. Objects that are id()-identical won\'t be preserved across\n encode()/decode(), but the resulting JSON stream will be conceptually\n simpler. jsonpickle detects cyclical objects and will break the cycle\n by calling repr() instead of recursing when make_refs is set False.\n :param keys: If set to True then jsonpickle will encode non-string\n dictionary keys instead of coercing them into strings via `repr()`.\n This is typically what you want if you need to support Integer or\n objects as dictionary keys.\n :param numeric_keys: Only use this option if the backend supports integer\n dict keys natively. This flag tells jsonpickle to leave numeric keys\n as-is rather than conforming them to json-friendly strings.\n Using ``keys=True`` is the typical solution for integer keys, so only\n use this if you have a specific use case where you want to allow the\n backend to handle serialization of numeric dict keys.\n :param warn: If set to True then jsonpickle will warn when it\n returns None for an object which it cannot pickle\n (e.g. file descriptors).\n :param max_iter: If set to a non-negative integer then jsonpickle will\n consume at most `max_iter` items when pickling iterators.\n :param use_decimal: If set to True jsonpickle will allow Decimal\n instances to pass-through, with the assumption that the simplejson\n backend will be used in `use_decimal` mode. In order to use this mode\n you will need to configure simplejson::\n\n jsonpickle.set_encoder_options(\'simplejson\',\n use_decimal=True, sort_keys=True)\n jsonpickle.set_decoder_options(\'simplejson\',\n use_decimal=True)\n jsonpickle.set_preferred_backend(\'simplejson\')\n\n NOTE: A side-effect of the above settings is that float values will be\n converted to Decimal when converting to json.\n :param use_base85:\n If possible, use base85 to encode binary data. Base85 bloats binary data\n by 1/4 as opposed to base64, which expands it by 1/3. This argument is\n ignored on Python 2 because it doesn\'t support it.\n :param fail_safe: If set to a function exceptions are ignored when pickling\n and if a exception happens the function is called and the return value\n is used as the value for the object that caused the error\n :param indent: When `indent` is a non-negative integer, then JSON array\n elements and object members will be pretty-printed with that indent\n level. An indent level of 0 will only insert newlines. ``None`` is\n the most compact representation. Since the default item separator is\n ``(\', \', \': \')``, the output might include trailing whitespace when\n ``indent`` is specified. You can use ``separators=(\',\', \': \')`` to\n avoid this. This value is passed directly to the active JSON backend\n library and not used by jsonpickle directly.\n :param separators:\n If ``separators`` is an ``(item_separator, dict_separator)`` tuple\n then it will be used instead of the default ``(\', \', \': \')``\n separators. ``(\',\', \':\')`` is the most compact JSON representation.\n This value is passed directly to the active JSON backend library and\n not used by jsonpickle directly.\n\n >>> encode(\'my string\') == \'"my string"\'\n True\n >>> encode(36) == \'36\'\n True\n >>> encode({\'foo\': True}) == \'{"foo": true}\'\n True\n >>> encode({\'foo\': [1, 2, [3, 4]]}, max_depth=1)\n \'{"foo": "[1, 2, [3, 4]]"}\'\n\n '
backend = (backend or json)
context = (context or Pickler(unpicklable=unpicklable, make_refs=make_refs, keys=keys, backend=backend, max_depth=max_depth, warn=warn, max_iter=max_iter, numeric_keys=numeric_keys, use_decimal=use_decimal, use_base85=use_base85, fail_safe=fail_safe))
return backend.encode(context.flatten(value, reset=reset), indent=indent, separators=separators) | -1,162,178,576,800,201,200 | Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}' | jsonpickle/pickler.py | encode | JHP4911/jsonpickle | python | def encode(value, unpicklable=True, make_refs=True, keys=False, max_depth=None, reset=True, backend=None, warn=False, context=None, max_iter=None, use_decimal=False, numeric_keys=False, use_base85=False, fail_safe=None, indent=None, separators=None):
'Return a JSON formatted representation of value, a Python object.\n\n :param unpicklable: If set to False then the output will not contain the\n information necessary to turn the JSON data back into Python objects,\n but a simpler JSON stream is produced.\n :param max_depth: If set to a non-negative integer then jsonpickle will\n not recurse deeper than \'max_depth\' steps into the object. Anything\n deeper than \'max_depth\' is represented using a Python repr() of the\n object.\n :param make_refs: If set to False jsonpickle\'s referencing support is\n disabled. Objects that are id()-identical won\'t be preserved across\n encode()/decode(), but the resulting JSON stream will be conceptually\n simpler. jsonpickle detects cyclical objects and will break the cycle\n by calling repr() instead of recursing when make_refs is set False.\n :param keys: If set to True then jsonpickle will encode non-string\n dictionary keys instead of coercing them into strings via `repr()`.\n This is typically what you want if you need to support Integer or\n objects as dictionary keys.\n :param numeric_keys: Only use this option if the backend supports integer\n dict keys natively. This flag tells jsonpickle to leave numeric keys\n as-is rather than conforming them to json-friendly strings.\n Using ``keys=True`` is the typical solution for integer keys, so only\n use this if you have a specific use case where you want to allow the\n backend to handle serialization of numeric dict keys.\n :param warn: If set to True then jsonpickle will warn when it\n returns None for an object which it cannot pickle\n (e.g. file descriptors).\n :param max_iter: If set to a non-negative integer then jsonpickle will\n consume at most `max_iter` items when pickling iterators.\n :param use_decimal: If set to True jsonpickle will allow Decimal\n instances to pass-through, with the assumption that the simplejson\n backend will be used in `use_decimal` mode. In order to use this mode\n you will need to configure simplejson::\n\n jsonpickle.set_encoder_options(\'simplejson\',\n use_decimal=True, sort_keys=True)\n jsonpickle.set_decoder_options(\'simplejson\',\n use_decimal=True)\n jsonpickle.set_preferred_backend(\'simplejson\')\n\n NOTE: A side-effect of the above settings is that float values will be\n converted to Decimal when converting to json.\n :param use_base85:\n If possible, use base85 to encode binary data. Base85 bloats binary data\n by 1/4 as opposed to base64, which expands it by 1/3. This argument is\n ignored on Python 2 because it doesn\'t support it.\n :param fail_safe: If set to a function exceptions are ignored when pickling\n and if a exception happens the function is called and the return value\n is used as the value for the object that caused the error\n :param indent: When `indent` is a non-negative integer, then JSON array\n elements and object members will be pretty-printed with that indent\n level. An indent level of 0 will only insert newlines. ``None`` is\n the most compact representation. Since the default item separator is\n ``(\', \', \': \')``, the output might include trailing whitespace when\n ``indent`` is specified. You can use ``separators=(\',\', \': \')`` to\n avoid this. This value is passed directly to the active JSON backend\n library and not used by jsonpickle directly.\n :param separators:\n If ``separators`` is an ``(item_separator, dict_separator)`` tuple\n then it will be used instead of the default ``(\', \', \': \')``\n separators. ``(\',\', \':\')`` is the most compact JSON representation.\n This value is passed directly to the active JSON backend library and\n not used by jsonpickle directly.\n\n >>> encode(\'my string\') == \'"my string"\'\n True\n >>> encode(36) == \'36\'\n True\n >>> encode({\'foo\': True}) == \'{"foo": true}\'\n True\n >>> encode({\'foo\': [1, 2, [3, 4]]}, max_depth=1)\n \'{"foo": "[1, 2, [3, 4]]"}\'\n\n '
backend = (backend or json)
context = (context or Pickler(unpicklable=unpicklable, make_refs=make_refs, keys=keys, backend=backend, max_depth=max_depth, warn=warn, max_iter=max_iter, numeric_keys=numeric_keys, use_decimal=use_decimal, use_base85=use_base85, fail_safe=fail_safe))
return backend.encode(context.flatten(value, reset=reset), indent=indent, separators=separators) |
def _in_cycle(obj, objs, max_reached, make_refs):
'Detect cyclic structures that would lead to infinite recursion'
return ((max_reached or ((not make_refs) and (id(obj) in objs))) and (not util.is_primitive(obj)) and (not util.is_enum(obj))) | 6,015,507,567,758,734,000 | Detect cyclic structures that would lead to infinite recursion | jsonpickle/pickler.py | _in_cycle | JHP4911/jsonpickle | python | def _in_cycle(obj, objs, max_reached, make_refs):
return ((max_reached or ((not make_refs) and (id(obj) in objs))) and (not util.is_primitive(obj)) and (not util.is_enum(obj))) |
def _mktyperef(obj):
"Return a typeref dictionary\n\n >>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}\n True\n\n "
return {tags.TYPE: util.importable_name(obj)} | 474,281,545,766,140,800 | Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True | jsonpickle/pickler.py | _mktyperef | JHP4911/jsonpickle | python | def _mktyperef(obj):
"Return a typeref dictionary\n\n >>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}\n True\n\n "
return {tags.TYPE: util.importable_name(obj)} |
def _wrap_string_slot(string):
"Converts __slots__ = 'a' into __slots__ = ('a',)"
if isinstance(string, string_types):
return (string,)
return string | -4,815,382,499,170,634,000 | Converts __slots__ = 'a' into __slots__ = ('a',) | jsonpickle/pickler.py | _wrap_string_slot | JHP4911/jsonpickle | python | def _wrap_string_slot(string):
if isinstance(string, string_types):
return (string,)
return string |
def _push(self):
'Steps down one level in the namespace.'
self._depth += 1 | 9,082,772,851,002,886,000 | Steps down one level in the namespace. | jsonpickle/pickler.py | _push | JHP4911/jsonpickle | python | def _push(self):
self._depth += 1 |
def _pop(self, value):
"Step up one level in the namespace and return the value.\n If we're at the root, reset the pickler's state.\n "
self._depth -= 1
if (self._depth == (- 1)):
self.reset()
return value | -313,070,934,508,439,300 | Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state. | jsonpickle/pickler.py | _pop | JHP4911/jsonpickle | python | def _pop(self, value):
"Step up one level in the namespace and return the value.\n If we're at the root, reset the pickler's state.\n "
self._depth -= 1
if (self._depth == (- 1)):
self.reset()
return value |
def _log_ref(self, obj):
'\n Log a reference to an in-memory object.\n Return True if this object is new and was assigned\n a new ID. Otherwise return False.\n '
objid = id(obj)
is_new = (objid not in self._objs)
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new | -3,513,475,387,688,375,300 | Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False. | jsonpickle/pickler.py | _log_ref | JHP4911/jsonpickle | python | def _log_ref(self, obj):
'\n Log a reference to an in-memory object.\n Return True if this object is new and was assigned\n a new ID. Otherwise return False.\n '
objid = id(obj)
is_new = (objid not in self._objs)
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new |
def _mkref(self, obj):
'\n Log a reference to an in-memory object, and return\n if that object should be considered newly logged.\n '
is_new = self._log_ref(obj)
pretend_new = ((not self.unpicklable) or (not self.make_refs))
return (pretend_new or is_new) | 1,706,100,071,685,022,200 | Log a reference to an in-memory object, and return
if that object should be considered newly logged. | jsonpickle/pickler.py | _mkref | JHP4911/jsonpickle | python | def _mkref(self, obj):
'\n Log a reference to an in-memory object, and return\n if that object should be considered newly logged.\n '
is_new = self._log_ref(obj)
pretend_new = ((not self.unpicklable) or (not self.make_refs))
return (pretend_new or is_new) |
def flatten(self, obj, reset=True):
"Takes an object and returns a JSON-safe representation of it.\n\n Simply returns any of the basic builtin datatypes\n\n >>> p = Pickler()\n >>> p.flatten('hello world') == 'hello world'\n True\n >>> p.flatten(49)\n 49\n >>> p.flatten(350.0)\n 350.0\n >>> p.flatten(True)\n True\n >>> p.flatten(False)\n False\n >>> r = p.flatten(None)\n >>> r is None\n True\n >>> p.flatten(False)\n False\n >>> p.flatten([1, 2, 3, 4])\n [1, 2, 3, 4]\n >>> p.flatten((1,2,))[tags.TUPLE]\n [1, 2]\n >>> p.flatten({'key': 'value'}) == {'key': 'value'}\n True\n "
if reset:
self.reset()
return self._flatten(obj) | -3,647,423,070,333,986,300 | Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True | jsonpickle/pickler.py | flatten | JHP4911/jsonpickle | python | def flatten(self, obj, reset=True):
"Takes an object and returns a JSON-safe representation of it.\n\n Simply returns any of the basic builtin datatypes\n\n >>> p = Pickler()\n >>> p.flatten('hello world') == 'hello world'\n True\n >>> p.flatten(49)\n 49\n >>> p.flatten(350.0)\n 350.0\n >>> p.flatten(True)\n True\n >>> p.flatten(False)\n False\n >>> r = p.flatten(None)\n >>> r is None\n True\n >>> p.flatten(False)\n False\n >>> p.flatten([1, 2, 3, 4])\n [1, 2, 3, 4]\n >>> p.flatten((1,2,))[tags.TUPLE]\n [1, 2]\n >>> p.flatten({'key': 'value'}) == {'key': 'value'}\n True\n "
if reset:
self.reset()
return self._flatten(obj) |
def _ref_obj_instance(self, obj):
'Reference an existing object or flatten if new'
if self.unpicklable:
if self._mkref(obj):
return self._flatten_obj_instance(obj)
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
return None
self._mkref(obj)
return self._flatten_obj_instance(obj) | 6,787,927,570,877,706,000 | Reference an existing object or flatten if new | jsonpickle/pickler.py | _ref_obj_instance | JHP4911/jsonpickle | python | def _ref_obj_instance(self, obj):
if self.unpicklable:
if self._mkref(obj):
return self._flatten_obj_instance(obj)
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
return None
self._mkref(obj)
return self._flatten_obj_instance(obj) |
def _flatten_file(self, obj):
'\n Special case file objects\n '
assert ((not PY3) and isinstance(obj, types.FileType))
return None | -7,222,675,722,203,146,000 | Special case file objects | jsonpickle/pickler.py | _flatten_file | JHP4911/jsonpickle | python | def _flatten_file(self, obj):
'\n \n '
assert ((not PY3) and isinstance(obj, types.FileType))
return None |
def _flatten_obj_instance(self, obj):
'Recursively flatten an instance and return a json-friendly dict'
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = ((not has_dict) and hasattr(obj, '__slots__'))
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
(has_reduce, has_reduce_ex) = util.has_reduce(obj)
has_getstate = hasattr(obj, '__getstate__')
if has_class:
cls = obj.__class__
else:
cls = type(obj)
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if (handler is not None):
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if (has_reduce and (not has_reduce_ex)):
try:
reduce_val = obj.__reduce__()
except TypeError:
pass
elif has_reduce_ex:
try:
reduce_val = obj.__reduce_ex__(2)
except TypeError:
pass
if (reduce_val and isinstance(reduce_val, string_types)):
try:
varpath = iter(reduce_val.split('.'))
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
return self._flatten(curmod)
except KeyError:
pass
elif reduce_val:
rv_as_list = list(reduce_val)
insufficiency = (5 - len(rv_as_list))
if insufficiency:
rv_as_list += ([None] * insufficiency)
if (getattr(rv_as_list[0], '__name__', '') == '__newobj__'):
rv_as_list[0] = tags.NEWOBJ
(f, args, state, listitems, dictitems) = rv_as_list
if (not (state and hasattr(obj, '__getstate__') and (not hasattr(obj, '__setstate__')) and (not isinstance(obj, dict)))):
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = (len(reduce_args) - 1)
while ((last_index >= 2) and (reduce_args[last_index] is None)):
last_index -= 1
data[tags.REDUCE] = reduce_args[:(last_index + 1)]
return data
if (has_class and (not util.is_module(obj))):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if (has_getnewargs and (not has_getnewargs_ex)):
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
if data:
return data
self._pickle_warning(obj)
return None | -5,174,354,392,125,870,000 | Recursively flatten an instance and return a json-friendly dict | jsonpickle/pickler.py | _flatten_obj_instance | JHP4911/jsonpickle | python | def _flatten_obj_instance(self, obj):
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = ((not has_dict) and hasattr(obj, '__slots__'))
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
(has_reduce, has_reduce_ex) = util.has_reduce(obj)
has_getstate = hasattr(obj, '__getstate__')
if has_class:
cls = obj.__class__
else:
cls = type(obj)
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if (handler is not None):
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if (has_reduce and (not has_reduce_ex)):
try:
reduce_val = obj.__reduce__()
except TypeError:
pass
elif has_reduce_ex:
try:
reduce_val = obj.__reduce_ex__(2)
except TypeError:
pass
if (reduce_val and isinstance(reduce_val, string_types)):
try:
varpath = iter(reduce_val.split('.'))
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
return self._flatten(curmod)
except KeyError:
pass
elif reduce_val:
rv_as_list = list(reduce_val)
insufficiency = (5 - len(rv_as_list))
if insufficiency:
rv_as_list += ([None] * insufficiency)
if (getattr(rv_as_list[0], '__name__', ) == '__newobj__'):
rv_as_list[0] = tags.NEWOBJ
(f, args, state, listitems, dictitems) = rv_as_list
if (not (state and hasattr(obj, '__getstate__') and (not hasattr(obj, '__setstate__')) and (not isinstance(obj, dict)))):
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = (len(reduce_args) - 1)
while ((last_index >= 2) and (reduce_args[last_index] is None)):
last_index -= 1
data[tags.REDUCE] = reduce_args[:(last_index + 1)]
return data
if (has_class and (not util.is_module(obj))):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if (has_getnewargs and (not has_getnewargs_ex)):
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
if data:
return data
self._pickle_warning(obj)
return None |
def _flatten_dict_obj(self, obj, data=None):
'Recursively call flatten() and return json-friendly dict'
if (data is None):
data = obj.__class__()
if self.keys:
flatten = self._flatten_string_key_value_pair
for (k, v) in util.items(obj):
flatten(k, v, data)
flatten = self._flatten_non_string_key_value_pair
for (k, v) in util.items(obj):
flatten(k, v, data)
else:
flatten = self._flatten_key_value_pair
for (k, v) in util.items(obj):
flatten(k, v, data)
if (hasattr(obj, 'default_factory') and callable(obj.default_factory)):
factory = obj.default_factory
if util.is_type(factory):
value = _mktyperef(factory)
elif self._mkref(factory):
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
value = self._getref(factory)
data['default_factory'] = value
if (hasattr(obj, '__dict__') and self.unpicklable):
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data | -7,268,695,046,591,207,000 | Recursively call flatten() and return json-friendly dict | jsonpickle/pickler.py | _flatten_dict_obj | JHP4911/jsonpickle | python | def _flatten_dict_obj(self, obj, data=None):
if (data is None):
data = obj.__class__()
if self.keys:
flatten = self._flatten_string_key_value_pair
for (k, v) in util.items(obj):
flatten(k, v, data)
flatten = self._flatten_non_string_key_value_pair
for (k, v) in util.items(obj):
flatten(k, v, data)
else:
flatten = self._flatten_key_value_pair
for (k, v) in util.items(obj):
flatten(k, v, data)
if (hasattr(obj, 'default_factory') and callable(obj.default_factory)):
factory = obj.default_factory
if util.is_type(factory):
value = _mktyperef(factory)
elif self._mkref(factory):
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
value = self._getref(factory)
data['default_factory'] = value
if (hasattr(obj, '__dict__') and self.unpicklable):
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data |
def _flatten_newstyle_with_slots(self, obj, data):
'Return a json-friendly dict for new-style objects with __slots__.'
allslots = [_wrap_string_slot(getattr(cls, '__slots__', tuple())) for cls in obj.__class__.mro()]
if (not self._flatten_obj_attrs(obj, chain(*allslots), data)):
attrs = [x for x in dir(obj) if ((not x.startswith('__')) and (not x.endswith('__')))]
self._flatten_obj_attrs(obj, attrs, data)
return data | 4,563,973,177,544,642,000 | Return a json-friendly dict for new-style objects with __slots__. | jsonpickle/pickler.py | _flatten_newstyle_with_slots | JHP4911/jsonpickle | python | def _flatten_newstyle_with_slots(self, obj, data):
allslots = [_wrap_string_slot(getattr(cls, '__slots__', tuple())) for cls in obj.__class__.mro()]
if (not self._flatten_obj_attrs(obj, chain(*allslots), data)):
attrs = [x for x in dir(obj) if ((not x.startswith('__')) and (not x.endswith('__')))]
self._flatten_obj_attrs(obj, attrs, data)
return data |
def _flatten_key_value_pair(self, k, v, data):
'Flatten a key/value pair into the passed-in dictionary.'
if (not util.is_picklable(k, v)):
return data
if (k is None):
k = 'null'
if (self.numeric_keys and isinstance(k, numeric_types)):
pass
elif (not isinstance(k, string_types)):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data | 2,156,137,296,504,074,500 | Flatten a key/value pair into the passed-in dictionary. | jsonpickle/pickler.py | _flatten_key_value_pair | JHP4911/jsonpickle | python | def _flatten_key_value_pair(self, k, v, data):
if (not util.is_picklable(k, v)):
return data
if (k is None):
k = 'null'
if (self.numeric_keys and isinstance(k, numeric_types)):
pass
elif (not isinstance(k, string_types)):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data |
def _flatten_non_string_key_value_pair(self, k, v, data):
'Flatten only non-string key/value pairs'
if (not util.is_picklable(k, v)):
return data
if (self.keys and (not isinstance(k, string_types))):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data | 1,580,031,440,237,875,000 | Flatten only non-string key/value pairs | jsonpickle/pickler.py | _flatten_non_string_key_value_pair | JHP4911/jsonpickle | python | def _flatten_non_string_key_value_pair(self, k, v, data):
if (not util.is_picklable(k, v)):
return data
if (self.keys and (not isinstance(k, string_types))):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data |
def _flatten_string_key_value_pair(self, k, v, data):
'Flatten string key/value pairs only.'
if (not util.is_picklable(k, v)):
return data
if self.keys:
if (not isinstance(k, string_types)):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if (k is None):
k = 'null'
if (self.numeric_keys and isinstance(k, numeric_types)):
pass
elif (not isinstance(k, string_types)):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data | -7,087,045,067,066,155,000 | Flatten string key/value pairs only. | jsonpickle/pickler.py | _flatten_string_key_value_pair | JHP4911/jsonpickle | python | def _flatten_string_key_value_pair(self, k, v, data):
if (not util.is_picklable(k, v)):
return data
if self.keys:
if (not isinstance(k, string_types)):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if (k is None):
k = 'null'
if (self.numeric_keys and isinstance(k, numeric_types)):
pass
elif (not isinstance(k, string_types)):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data |
def _flatten_sequence_obj(self, obj, data):
'Return a json-friendly dict for a sequence subclass.'
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data | 1,514,014,164,244,263,700 | Return a json-friendly dict for a sequence subclass. | jsonpickle/pickler.py | _flatten_sequence_obj | JHP4911/jsonpickle | python | def _flatten_sequence_obj(self, obj, data):
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data |
def schedule_conv2d_hwcn(outs):
'Schedule for conv2d_hwcn and any element-wise operations.\n\n Parameters\n ----------\n outs: Array of Tensor\n The computation graph description of conv2d_hwcn in the format\n of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for conv2d_hwcn.\n '
outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs)
sch = tvm.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
'Schedule conv2d_hwcn'
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, 'shared', [B])
WW = sch.cache_read(W, 'shared', [B])
AL = sch.cache_read(AA, 'local', [B])
WL = sch.cache_read(WW, 'local', [B])
if (B.op in sch.outputs):
Out = B
BL = sch.cache_write(Out, 'local')
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope('local')
BL = B
tile = 8
num_thread = 8
block_factor = (tile * num_thread)
step = 8
vthread = 2
block_x = tvm.thread_axis('blockIdx.x')
block_y = tvm.thread_axis('blockIdx.y')
block_z = tvm.thread_axis('blockIdx.z')
thread_x = tvm.thread_axis((0, num_thread), 'threadIdx.x')
thread_y = tvm.thread_axis((0, num_thread), 'threadIdx.y')
thread_xz = tvm.thread_axis((0, vthread), 'vthread', name='vx')
thread_yz = tvm.thread_axis((0, vthread), 'vthread', name='vy')
(hi, wi, fi, ni) = sch[Out].op.axis
bz = sch[Out].fuse(hi, wi)
(by, fi) = sch[Out].split(fi, factor=block_factor)
(bx, ni) = sch[Out].split(ni, factor=block_factor)
(tyz, fi) = sch[Out].split(fi, nparts=vthread)
(txz, ni) = sch[Out].split(ni, nparts=vthread)
(ty, fi) = sch[Out].split(fi, nparts=num_thread)
(tx, ni) = sch[Out].split(ni, nparts=num_thread)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, block_z)
sch[Out].bind(by, block_y)
sch[Out].bind(bx, block_x)
sch[Out].bind(tyz, thread_yz)
sch[Out].bind(txz, thread_xz)
sch[Out].bind(ty, thread_y)
sch[Out].bind(tx, thread_x)
sch[BL].compute_at(sch[Out], tx)
(yi, xi, fi, ni) = sch[BL].op.axis
(ry, rx, rc) = sch[BL].op.reduce_axis
(rco, rci) = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
(yi, xi, ci, ni) = sch[AA].op.axis
(ty, ci) = sch[AA].split(ci, nparts=num_thread)
(tx, ni) = sch[AA].split(ni, nparts=num_thread)
(_, ni) = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, thread_y)
sch[AA].bind(tx, thread_x)
sch[AA].vectorize(ni)
(yi, xi, ci, fi) = sch[WW].op.axis
(ty, ci) = sch[WW].split(ci, nparts=num_thread)
(tx, fi) = sch[WW].split(fi, nparts=num_thread)
(_, fi) = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, thread_y)
sch[WW].bind(tx, thread_x)
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
'Traverse operators from computation graph'
if tag.is_broadcast(operator.tag):
if (operator not in sch.outputs):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if (tensor.op.input_tensors and (tensor.op not in scheduled_ops)):
traverse(tensor.op)
elif (operator.tag == 'conv2d_hwcn'):
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if (isinstance(W.op, tvm.tensor.ComputeOp) and ('dilate' in W.op.tag)):
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError(('Unsupported operator: %s' % operator.tag))
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch | 3,848,554,958,064,389,600 | Schedule for conv2d_hwcn and any element-wise operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_hwcn. | topi/python/topi/cuda/conv2d_hwcn.py | schedule_conv2d_hwcn | CortexFoundation/tvm-cvm | python | def schedule_conv2d_hwcn(outs):
'Schedule for conv2d_hwcn and any element-wise operations.\n\n Parameters\n ----------\n outs: Array of Tensor\n The computation graph description of conv2d_hwcn in the format\n of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for conv2d_hwcn.\n '
outs = ([outs] if isinstance(outs, tvm.tensor.Tensor) else outs)
sch = tvm.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
'Schedule conv2d_hwcn'
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, 'shared', [B])
WW = sch.cache_read(W, 'shared', [B])
AL = sch.cache_read(AA, 'local', [B])
WL = sch.cache_read(WW, 'local', [B])
if (B.op in sch.outputs):
Out = B
BL = sch.cache_write(Out, 'local')
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope('local')
BL = B
tile = 8
num_thread = 8
block_factor = (tile * num_thread)
step = 8
vthread = 2
block_x = tvm.thread_axis('blockIdx.x')
block_y = tvm.thread_axis('blockIdx.y')
block_z = tvm.thread_axis('blockIdx.z')
thread_x = tvm.thread_axis((0, num_thread), 'threadIdx.x')
thread_y = tvm.thread_axis((0, num_thread), 'threadIdx.y')
thread_xz = tvm.thread_axis((0, vthread), 'vthread', name='vx')
thread_yz = tvm.thread_axis((0, vthread), 'vthread', name='vy')
(hi, wi, fi, ni) = sch[Out].op.axis
bz = sch[Out].fuse(hi, wi)
(by, fi) = sch[Out].split(fi, factor=block_factor)
(bx, ni) = sch[Out].split(ni, factor=block_factor)
(tyz, fi) = sch[Out].split(fi, nparts=vthread)
(txz, ni) = sch[Out].split(ni, nparts=vthread)
(ty, fi) = sch[Out].split(fi, nparts=num_thread)
(tx, ni) = sch[Out].split(ni, nparts=num_thread)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, block_z)
sch[Out].bind(by, block_y)
sch[Out].bind(bx, block_x)
sch[Out].bind(tyz, thread_yz)
sch[Out].bind(txz, thread_xz)
sch[Out].bind(ty, thread_y)
sch[Out].bind(tx, thread_x)
sch[BL].compute_at(sch[Out], tx)
(yi, xi, fi, ni) = sch[BL].op.axis
(ry, rx, rc) = sch[BL].op.reduce_axis
(rco, rci) = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
(yi, xi, ci, ni) = sch[AA].op.axis
(ty, ci) = sch[AA].split(ci, nparts=num_thread)
(tx, ni) = sch[AA].split(ni, nparts=num_thread)
(_, ni) = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, thread_y)
sch[AA].bind(tx, thread_x)
sch[AA].vectorize(ni)
(yi, xi, ci, fi) = sch[WW].op.axis
(ty, ci) = sch[WW].split(ci, nparts=num_thread)
(tx, fi) = sch[WW].split(fi, nparts=num_thread)
(_, fi) = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, thread_y)
sch[WW].bind(tx, thread_x)
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
'Traverse operators from computation graph'
if tag.is_broadcast(operator.tag):
if (operator not in sch.outputs):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if (tensor.op.input_tensors and (tensor.op not in scheduled_ops)):
traverse(tensor.op)
elif (operator.tag == 'conv2d_hwcn'):
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if (isinstance(W.op, tvm.tensor.ComputeOp) and ('dilate' in W.op.tag)):
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError(('Unsupported operator: %s' % operator.tag))
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch |
def schedule(Apad, W, B):
'Schedule conv2d_hwcn'
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, 'shared', [B])
WW = sch.cache_read(W, 'shared', [B])
AL = sch.cache_read(AA, 'local', [B])
WL = sch.cache_read(WW, 'local', [B])
if (B.op in sch.outputs):
Out = B
BL = sch.cache_write(Out, 'local')
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope('local')
BL = B
tile = 8
num_thread = 8
block_factor = (tile * num_thread)
step = 8
vthread = 2
block_x = tvm.thread_axis('blockIdx.x')
block_y = tvm.thread_axis('blockIdx.y')
block_z = tvm.thread_axis('blockIdx.z')
thread_x = tvm.thread_axis((0, num_thread), 'threadIdx.x')
thread_y = tvm.thread_axis((0, num_thread), 'threadIdx.y')
thread_xz = tvm.thread_axis((0, vthread), 'vthread', name='vx')
thread_yz = tvm.thread_axis((0, vthread), 'vthread', name='vy')
(hi, wi, fi, ni) = sch[Out].op.axis
bz = sch[Out].fuse(hi, wi)
(by, fi) = sch[Out].split(fi, factor=block_factor)
(bx, ni) = sch[Out].split(ni, factor=block_factor)
(tyz, fi) = sch[Out].split(fi, nparts=vthread)
(txz, ni) = sch[Out].split(ni, nparts=vthread)
(ty, fi) = sch[Out].split(fi, nparts=num_thread)
(tx, ni) = sch[Out].split(ni, nparts=num_thread)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, block_z)
sch[Out].bind(by, block_y)
sch[Out].bind(bx, block_x)
sch[Out].bind(tyz, thread_yz)
sch[Out].bind(txz, thread_xz)
sch[Out].bind(ty, thread_y)
sch[Out].bind(tx, thread_x)
sch[BL].compute_at(sch[Out], tx)
(yi, xi, fi, ni) = sch[BL].op.axis
(ry, rx, rc) = sch[BL].op.reduce_axis
(rco, rci) = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
(yi, xi, ci, ni) = sch[AA].op.axis
(ty, ci) = sch[AA].split(ci, nparts=num_thread)
(tx, ni) = sch[AA].split(ni, nparts=num_thread)
(_, ni) = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, thread_y)
sch[AA].bind(tx, thread_x)
sch[AA].vectorize(ni)
(yi, xi, ci, fi) = sch[WW].op.axis
(ty, ci) = sch[WW].split(ci, nparts=num_thread)
(tx, fi) = sch[WW].split(fi, nparts=num_thread)
(_, fi) = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, thread_y)
sch[WW].bind(tx, thread_x)
sch[WW].vectorize(fi) | 8,421,041,648,292,286,000 | Schedule conv2d_hwcn | topi/python/topi/cuda/conv2d_hwcn.py | schedule | CortexFoundation/tvm-cvm | python | def schedule(Apad, W, B):
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, 'shared', [B])
WW = sch.cache_read(W, 'shared', [B])
AL = sch.cache_read(AA, 'local', [B])
WL = sch.cache_read(WW, 'local', [B])
if (B.op in sch.outputs):
Out = B
BL = sch.cache_write(Out, 'local')
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope('local')
BL = B
tile = 8
num_thread = 8
block_factor = (tile * num_thread)
step = 8
vthread = 2
block_x = tvm.thread_axis('blockIdx.x')
block_y = tvm.thread_axis('blockIdx.y')
block_z = tvm.thread_axis('blockIdx.z')
thread_x = tvm.thread_axis((0, num_thread), 'threadIdx.x')
thread_y = tvm.thread_axis((0, num_thread), 'threadIdx.y')
thread_xz = tvm.thread_axis((0, vthread), 'vthread', name='vx')
thread_yz = tvm.thread_axis((0, vthread), 'vthread', name='vy')
(hi, wi, fi, ni) = sch[Out].op.axis
bz = sch[Out].fuse(hi, wi)
(by, fi) = sch[Out].split(fi, factor=block_factor)
(bx, ni) = sch[Out].split(ni, factor=block_factor)
(tyz, fi) = sch[Out].split(fi, nparts=vthread)
(txz, ni) = sch[Out].split(ni, nparts=vthread)
(ty, fi) = sch[Out].split(fi, nparts=num_thread)
(tx, ni) = sch[Out].split(ni, nparts=num_thread)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, block_z)
sch[Out].bind(by, block_y)
sch[Out].bind(bx, block_x)
sch[Out].bind(tyz, thread_yz)
sch[Out].bind(txz, thread_xz)
sch[Out].bind(ty, thread_y)
sch[Out].bind(tx, thread_x)
sch[BL].compute_at(sch[Out], tx)
(yi, xi, fi, ni) = sch[BL].op.axis
(ry, rx, rc) = sch[BL].op.reduce_axis
(rco, rci) = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
(yi, xi, ci, ni) = sch[AA].op.axis
(ty, ci) = sch[AA].split(ci, nparts=num_thread)
(tx, ni) = sch[AA].split(ni, nparts=num_thread)
(_, ni) = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, thread_y)
sch[AA].bind(tx, thread_x)
sch[AA].vectorize(ni)
(yi, xi, ci, fi) = sch[WW].op.axis
(ty, ci) = sch[WW].split(ci, nparts=num_thread)
(tx, fi) = sch[WW].split(fi, nparts=num_thread)
(_, fi) = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, thread_y)
sch[WW].bind(tx, thread_x)
sch[WW].vectorize(fi) |
def traverse(operator):
'Traverse operators from computation graph'
if tag.is_broadcast(operator.tag):
if (operator not in sch.outputs):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if (tensor.op.input_tensors and (tensor.op not in scheduled_ops)):
traverse(tensor.op)
elif (operator.tag == 'conv2d_hwcn'):
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if (isinstance(W.op, tvm.tensor.ComputeOp) and ('dilate' in W.op.tag)):
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError(('Unsupported operator: %s' % operator.tag))
scheduled_ops.append(operator) | 1,486,942,041,922,143,200 | Traverse operators from computation graph | topi/python/topi/cuda/conv2d_hwcn.py | traverse | CortexFoundation/tvm-cvm | python | def traverse(operator):
if tag.is_broadcast(operator.tag):
if (operator not in sch.outputs):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if (tensor.op.input_tensors and (tensor.op not in scheduled_ops)):
traverse(tensor.op)
elif (operator.tag == 'conv2d_hwcn'):
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if (isinstance(W.op, tvm.tensor.ComputeOp) and ('dilate' in W.op.tag)):
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError(('Unsupported operator: %s' % operator.tag))
scheduled_ops.append(operator) |
def lookup_class(self, new_class):
'Translate a new class name to the old class name.'
if (new_class in self.classes_dict):
(old_class, class_dict) = self.classes_dict[new_class]
if (old_class is not None):
return old_class
return None | 1,882,551,139,332,402,700 | Translate a new class name to the old class name. | tools/python/dex.py | lookup_class | gdawg/redex | python | def lookup_class(self, new_class):
if (new_class in self.classes_dict):
(old_class, class_dict) = self.classes_dict[new_class]
if (old_class is not None):
return old_class
return None |
def lookup_method(self, new_class, new_method):
'Translate a new class name and a new method into the old class\n name and the old method name.'
if (new_class in self.classes_dict):
(old_class, class_dict) = self.classes_dict[new_class]
if (new_method in class_dict):
return class_dict[new_method]
return None | -4,073,838,217,049,182,000 | Translate a new class name and a new method into the old class
name and the old method name. | tools/python/dex.py | lookup_method | gdawg/redex | python | def lookup_method(self, new_class, new_method):
'Translate a new class name and a new method into the old class\n name and the old method name.'
if (new_class in self.classes_dict):
(old_class, class_dict) = self.classes_dict[new_class]
if (new_method in class_dict):
return class_dict[new_method]
return None |
def get_method_id(self):
'Get the method_id_item for this method.'
if (self.method_id is None):
self.method_id = self.get_dex().get_method_id(self.encoded_method)
return self.method_id | -767,692,397,901,498,100 | Get the method_id_item for this method. | tools/python/dex.py | get_method_id | gdawg/redex | python | def get_method_id(self):
if (self.method_id is None):
self.method_id = self.get_dex().get_method_id(self.encoded_method)
return self.method_id |
def get_method_index(self):
'Get the method index into the method_ids array in the DEX file.'
return self.encoded_method.method_idx | -6,570,096,611,676,412,000 | Get the method index into the method_ids array in the DEX file. | tools/python/dex.py | get_method_index | gdawg/redex | python | def get_method_index(self):
return self.encoded_method.method_idx |
def get_code_offset(self):
'Get the code offset for this method.'
return self.encoded_method.code_off | 3,253,180,441,785,362,000 | Get the code offset for this method. | tools/python/dex.py | get_code_offset | gdawg/redex | python | def get_code_offset(self):
return self.encoded_method.code_off |
def get_code_item_index(self):
'Get the index into the code_items array in the dex file for the\n code for this method, or -1 if there is no code for this method.'
code_item = self.get_code_item()
if code_item:
return self.get_dex().get_code_item_index_from_code_off(code_item.get_offset())
return (- 1) | -6,968,737,353,221,798,000 | Get the index into the code_items array in the dex file for the
code for this method, or -1 if there is no code for this method. | tools/python/dex.py | get_code_item_index | gdawg/redex | python | def get_code_item_index(self):
'Get the index into the code_items array in the dex file for the\n code for this method, or -1 if there is no code for this method.'
code_item = self.get_code_item()
if code_item:
return self.get_dex().get_code_item_index_from_code_off(code_item.get_offset())
return (- 1) |
def get_name_in_file(self):
'Returns the name of the method as it is known in the current DEX\n file (no proguard remapping)'
if (self.name_in_file is None):
self.name_in_file = self.get_dex().get_string(self.get_method_id().name_idx)
return self.name_in_file | 8,711,479,776,300,216,000 | Returns the name of the method as it is known in the current DEX
file (no proguard remapping) | tools/python/dex.py | get_name_in_file | gdawg/redex | python | def get_name_in_file(self):
'Returns the name of the method as it is known in the current DEX\n file (no proguard remapping)'
if (self.name_in_file is None):
self.name_in_file = self.get_dex().get_string(self.get_method_id().name_idx)
return self.name_in_file |
def get_type_index(self):
'Get type ID index (class_idx) for this class.'
return self.class_def.class_idx | -8,132,714,876,377,389,000 | Get type ID index (class_idx) for this class. | tools/python/dex.py | get_type_index | gdawg/redex | python | def get_type_index(self):
return self.class_def.class_idx |
def get_name(self):
"Get the demangled name for a class if we have a proguard file or\n return the mangled name if we don't have a proguard file."
if (self.demangled is None):
mangled = self.get_mangled_name()
if mangled:
self.demangled = self.get_dex().demangle_class_name(mangled)
if (self.demangled is None):
self.demangled = mangled
return self.demangled | 3,952,231,635,240,121,000 | Get the demangled name for a class if we have a proguard file or
return the mangled name if we don't have a proguard file. | tools/python/dex.py | get_name | gdawg/redex | python | def get_name(self):
"Get the demangled name for a class if we have a proguard file or\n return the mangled name if we don't have a proguard file."
if (self.demangled is None):
mangled = self.get_mangled_name()
if mangled:
self.demangled = self.get_dex().demangle_class_name(mangled)
if (self.demangled is None):
self.demangled = mangled
return self.demangled |
def demangle_class_name(self, cls_mangled):
'Given a mangled type name as it would appear in a DEX file like\n "LX/JxK;", return the demangled version if we have a proguard file,\n otherwise return the original class typename'
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_class(cls_demangled)
return None | 2,609,167,484,196,999,700 | Given a mangled type name as it would appear in a DEX file like
"LX/JxK;", return the demangled version if we have a proguard file,
otherwise return the original class typename | tools/python/dex.py | demangle_class_name | gdawg/redex | python | def demangle_class_name(self, cls_mangled):
'Given a mangled type name as it would appear in a DEX file like\n "LX/JxK;", return the demangled version if we have a proguard file,\n otherwise return the original class typename'
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_class(cls_demangled)
return None |
def get_method_id(self, method_ref):
'method_ref can be one of:\n - a encoded_method object\n - integer method index'
method_ids = self.get_method_ids()
if method_ids:
if isinstance(method_ref, encoded_method):
if (method_ref.method_idx < len(method_ids)):
return method_ids[method_ref.method_idx]
elif isinstance(method_ref, numbers.Integral):
if (method_ref < len(method_ids)):
return method_ids[method_ref]
else:
raise ValueError(('invalid method_ref type %s' % type(method_ref)))
return None | -8,282,719,921,504,232,000 | method_ref can be one of:
- a encoded_method object
- integer method index | tools/python/dex.py | get_method_id | gdawg/redex | python | def get_method_id(self, method_ref):
'method_ref can be one of:\n - a encoded_method object\n - integer method index'
method_ids = self.get_method_ids()
if method_ids:
if isinstance(method_ref, encoded_method):
if (method_ref.method_idx < len(method_ids)):
return method_ids[method_ref.method_idx]
elif isinstance(method_ref, numbers.Integral):
if (method_ref < len(method_ids)):
return method_ids[method_ref]
else:
raise ValueError(('invalid method_ref type %s' % type(method_ref)))
return None |
def check_encoding(self, f=sys.stdout):
"Verify that this instruction can't be encoded more efficiently"
return 0 | 3,330,640,632,603,261,000 | Verify that this instruction can't be encoded more efficiently | tools/python/dex.py | check_encoding | gdawg/redex | python | def check_encoding(self, f=sys.stdout):
return 0 |
def new_encoding(self, f=sys.stdout):
'Look for bytes we can save by making new opcodes that are encoded\n as unsigned, or other optimizations'
return 0 | 3,116,464,888,852,868,600 | Look for bytes we can save by making new opcodes that are encoded
as unsigned, or other optimizations | tools/python/dex.py | new_encoding | gdawg/redex | python | def new_encoding(self, f=sys.stdout):
'Look for bytes we can save by making new opcodes that are encoded\n as unsigned, or other optimizations'
return 0 |
def opIsCommutative(self):
'Return True if the operation is commutative'
op = self.get_op()
return ((op == 144) or (op == 146) or (op == 149) or (op == 150) or (op == 151) or (op == 155) or (op == 157) or (op == 160) or (op == 161) or (op == 162) or (op == 166) or (op == 168) or (op == 171) or (op == 173)) | 3,570,157,811,082,132,000 | Return True if the operation is commutative | tools/python/dex.py | opIsCommutative | gdawg/redex | python | def opIsCommutative(self):
op = self.get_op()
return ((op == 144) or (op == 146) or (op == 149) or (op == 150) or (op == 151) or (op == 155) or (op == 157) or (op == 160) or (op == 161) or (op == 162) or (op == 166) or (op == 168) or (op == 171) or (op == 173)) |
def get_op(self):
'Return the 1 byte op field that tells us what instruction this is'
return (self.code_units[0] & 255) | 6,110,940,203,283,449,000 | Return the 1 byte op field that tells us what instruction this is | tools/python/dex.py | get_op | gdawg/redex | python | def get_op(self):
return (self.code_units[0] & 255) |
def get_A(self):
'Get the 4 bit value of A'
return ((self.code_units[0] >> 8) & 15) | 2,964,393,587,828,950,000 | Get the 4 bit value of A | tools/python/dex.py | get_A | gdawg/redex | python | def get_A(self):
return ((self.code_units[0] >> 8) & 15) |
def get_B(self):
'Get the 4 bit value of B'
return ((self.code_units[0] >> 12) & 15) | 413,042,585,781,201,150 | Get the 4 bit value of B | tools/python/dex.py | get_B | gdawg/redex | python | def get_B(self):
return ((self.code_units[0] >> 12) & 15) |
def get_AA(self):
'Get the 8 bit value of AA from the byte next to the Op'
return self.get_uint8_hi(0) | -6,221,053,770,818,469,000 | Get the 8 bit value of AA from the byte next to the Op | tools/python/dex.py | get_AA | gdawg/redex | python | def get_AA(self):
return self.get_uint8_hi(0) |
def __len__(self):
'Overload the length operator to give out the number of code units'
return len(self.code_units) | -5,097,450,298,375,161,000 | Overload the length operator to give out the number of code units | tools/python/dex.py | __len__ | gdawg/redex | python | def __len__(self):
return len(self.code_units) |
def __getitem__(self, key):
'Overload the [] operator to give out code units'
return self.code_units[key] | 4,717,513,790,348,059,000 | Overload the [] operator to give out code units | tools/python/dex.py | __getitem__ | gdawg/redex | python | def __getitem__(self, key):
return self.code_units[key] |
def setUp(self):
'Set up test fixtures, if any.' | 2,045,331,168,806,357,800 | Set up test fixtures, if any. | tests/test_calvestbr.py | setUp | IsaacHiguchi/calvestbr | python | def setUp(self):
|
def tearDown(self):
'Tear down test fixtures, if any.' | -1,468,603,304,420,127,200 | Tear down test fixtures, if any. | tests/test_calvestbr.py | tearDown | IsaacHiguchi/calvestbr | python | def tearDown(self):
|
def test_000_something(self):
'Test something.' | 7,145,567,716,661,586,000 | Test something. | tests/test_calvestbr.py | test_000_something | IsaacHiguchi/calvestbr | python | def test_000_something(self):
|
def download_pdc_id(pdc_id, _download_msg=True):
'Download a PDC dataset by its PDC study id.\n \n Returns:\n pandas.DataFrame: The clinical table for the study id.\n pandas.DataFrame: The quantitative table for the study id.\n '
if _download_msg:
clin_msg = f'Downloading clinical table for {pdc_id}...'
print(clin_msg, end='\r')
clin = _download_study_clin(pdc_id).set_index('case_submitter_id').sort_index()
if _download_msg:
print((' ' * len(clin_msg)), end='\r')
bio_msg = f'Downloading biospecimenPerStudy table for {pdc_id}...'
print(bio_msg, end='\r')
bio = _download_study_biospecimen(pdc_id).set_index('aliquot_submitter_id').sort_index()
if _download_msg:
print((' ' * len(bio_msg)), end='\r')
quant_msg = f'Downloading quantitative table for {pdc_id}...'
print(quant_msg, end='\r')
quant = _download_study_quant(pdc_id)
if _download_msg:
print((' ' * len(quant_msg)), end='\r')
format_msg = f'Formatting tables for {pdc_id}...'
print(format_msg, end='\r')
quant = quant.assign(aliquot_submitter_id=quant.iloc[:, 0].str.split(':', n=1, expand=True)[1]).drop(columns=quant.columns[0]).set_index('aliquot_submitter_id').sort_index()
quant = bio.join(quant, how='inner').reset_index().set_index(['case_submitter_id', 'aliquot_submitter_id']).sort_index()
if _download_msg:
print((' ' * len(format_msg)), end='\r')
return (clin, quant) | 1,277,476,623,142,595,600 | Download a PDC dataset by its PDC study id.
Returns:
pandas.DataFrame: The clinical table for the study id.
pandas.DataFrame: The quantitative table for the study id. | cptac/pancan/file_download.py | download_pdc_id | PayneLab/cptac | python | def download_pdc_id(pdc_id, _download_msg=True):
'Download a PDC dataset by its PDC study id.\n \n Returns:\n pandas.DataFrame: The clinical table for the study id.\n pandas.DataFrame: The quantitative table for the study id.\n '
if _download_msg:
clin_msg = f'Downloading clinical table for {pdc_id}...'
print(clin_msg, end='\r')
clin = _download_study_clin(pdc_id).set_index('case_submitter_id').sort_index()
if _download_msg:
print((' ' * len(clin_msg)), end='\r')
bio_msg = f'Downloading biospecimenPerStudy table for {pdc_id}...'
print(bio_msg, end='\r')
bio = _download_study_biospecimen(pdc_id).set_index('aliquot_submitter_id').sort_index()
if _download_msg:
print((' ' * len(bio_msg)), end='\r')
quant_msg = f'Downloading quantitative table for {pdc_id}...'
print(quant_msg, end='\r')
quant = _download_study_quant(pdc_id)
if _download_msg:
print((' ' * len(quant_msg)), end='\r')
format_msg = f'Formatting tables for {pdc_id}...'
print(format_msg, end='\r')
quant = quant.assign(aliquot_submitter_id=quant.iloc[:, 0].str.split(':', n=1, expand=True)[1]).drop(columns=quant.columns[0]).set_index('aliquot_submitter_id').sort_index()
quant = bio.join(quant, how='inner').reset_index().set_index(['case_submitter_id', 'aliquot_submitter_id']).sort_index()
if _download_msg:
print((' ' * len(format_msg)), end='\r')
return (clin, quant) |
def _pdc_download(dataset, version, redownload):
'Download data for the specified cancer type from the PDC.'
dataset = str.lower(dataset)
if (dataset == 'pdcall'):
overall_result = True
for dataset in STUDY_IDS_MAP.keys():
if (not pdc_download(dataset, version, redownload)):
overall_result = False
return overall_result
if (not dataset.startswith('pdc')):
raise InvalidParameterError(f"pdc_download function can only be used for PDC datasets, which start with the prefix 'pdc'. You tried to download '{dataset}'.")
if (dataset not in STUDY_IDS_MAP.keys()):
raise InvalidParameterError(f'''PDC dataset must be one of the following:
{list(STUDY_IDS_MAP.keys())}
You passed '{dataset}'.''')
dataset_ids = STUDY_IDS_MAP[dataset]
path_here = os.path.abspath(os.path.dirname(__file__))
cancer_dir = os.path.join(path_here, f'data_{dataset}')
if os.path.isdir(cancer_dir):
index_path = os.path.join(cancer_dir, 'index.txt')
if (not os.path.isfile(index_path)):
redownload = True
else:
with open(index_path, 'r') as index_file:
first_line = index_file.readline()
if first_line.startswith('#0.0'):
redownload = True
if redownload:
shutil.rmtree(cancer_dir)
else:
return True
os.mkdir(cancer_dir)
data_dir = os.path.join(cancer_dir, f'{dataset}_v1.0')
os.mkdir(data_dir)
master_clin = pd.DataFrame()
for data_type in dataset_ids.keys():
download_msg = f'Downloading {dataset} {data_type} files...'
print(download_msg, end='\r')
(clin, quant) = download_pdc_id(dataset_ids[data_type], _download_msg=False)
print((' ' * len(download_msg)), end='\r')
save_msg = f'Saving {dataset} {data_type} files...'
print(save_msg, end='\r')
master_clin = master_clin.append(clin)
quant.to_csv(os.path.join(data_dir, f'{data_type}.tsv.gz'), sep='\t')
print((' ' * len(save_msg)), end='\r')
save_msg = f'Saving {dataset} clinical file...'
print(save_msg, end='\r')
master_clin = master_clin.drop_duplicates(keep='first')
master_clin.to_csv(os.path.join(data_dir, 'clinical.tsv.gz'), sep='\t')
index_path = os.path.join(cancer_dir, 'index.txt')
with open(index_path, 'w') as index_file:
index_file.write('#1.0\n')
print((' ' * len(save_msg)), end='\r')
return True | -6,758,916,922,176,530,000 | Download data for the specified cancer type from the PDC. | cptac/pancan/file_download.py | _pdc_download | PayneLab/cptac | python | def _pdc_download(dataset, version, redownload):
dataset = str.lower(dataset)
if (dataset == 'pdcall'):
overall_result = True
for dataset in STUDY_IDS_MAP.keys():
if (not pdc_download(dataset, version, redownload)):
overall_result = False
return overall_result
if (not dataset.startswith('pdc')):
raise InvalidParameterError(f"pdc_download function can only be used for PDC datasets, which start with the prefix 'pdc'. You tried to download '{dataset}'.")
if (dataset not in STUDY_IDS_MAP.keys()):
raise InvalidParameterError(f'PDC dataset must be one of the following:
{list(STUDY_IDS_MAP.keys())}
You passed '{dataset}'.')
dataset_ids = STUDY_IDS_MAP[dataset]
path_here = os.path.abspath(os.path.dirname(__file__))
cancer_dir = os.path.join(path_here, f'data_{dataset}')
if os.path.isdir(cancer_dir):
index_path = os.path.join(cancer_dir, 'index.txt')
if (not os.path.isfile(index_path)):
redownload = True
else:
with open(index_path, 'r') as index_file:
first_line = index_file.readline()
if first_line.startswith('#0.0'):
redownload = True
if redownload:
shutil.rmtree(cancer_dir)
else:
return True
os.mkdir(cancer_dir)
data_dir = os.path.join(cancer_dir, f'{dataset}_v1.0')
os.mkdir(data_dir)
master_clin = pd.DataFrame()
for data_type in dataset_ids.keys():
download_msg = f'Downloading {dataset} {data_type} files...'
print(download_msg, end='\r')
(clin, quant) = download_pdc_id(dataset_ids[data_type], _download_msg=False)
print((' ' * len(download_msg)), end='\r')
save_msg = f'Saving {dataset} {data_type} files...'
print(save_msg, end='\r')
master_clin = master_clin.append(clin)
quant.to_csv(os.path.join(data_dir, f'{data_type}.tsv.gz'), sep='\t')
print((' ' * len(save_msg)), end='\r')
save_msg = f'Saving {dataset} clinical file...'
print(save_msg, end='\r')
master_clin = master_clin.drop_duplicates(keep='first')
master_clin.to_csv(os.path.join(data_dir, 'clinical.tsv.gz'), sep='\t')
index_path = os.path.join(cancer_dir, 'index.txt')
with open(index_path, 'w') as index_file:
index_file.write('#1.0\n')
print((' ' * len(save_msg)), end='\r')
return True |
def _download_study_clin(pdc_study_id):
'Download PDC clinical data for a particular study.'
clinical_query = (('\n query {\n clinicalPerStudy(pdc_study_id: "' + pdc_study_id) + '", acceptDUA: true) {\n age_at_diagnosis, ajcc_clinical_m, ajcc_clinical_n, ajcc_clinical_stage, ajcc_clinical_t, ajcc_pathologic_m,\n ajcc_pathologic_n, ajcc_pathologic_stage, ajcc_pathologic_t, ann_arbor_b_symptoms, ann_arbor_clinical_stage,\n ann_arbor_extranodal_involvement, ann_arbor_pathologic_stage, best_overall_response, burkitt_lymphoma_clinical_variant,\n case_id, case_submitter_id, cause_of_death, circumferential_resection_margin, classification_of_tumor, colon_polyps_history,\n days_to_best_overall_response, days_to_birth, days_to_death, days_to_diagnosis, days_to_hiv_diagnosis, days_to_last_follow_up,\n days_to_last_known_disease_status, days_to_new_event, days_to_recurrence, demographic_id, demographic_submitter_id,\n diagnosis_id, diagnosis_submitter_id, disease_type, ethnicity, figo_stage, gender, hiv_positive, hpv_positive_type, hpv_status,\n icd_10_code, iss_stage, last_known_disease_status, laterality, ldh_level_at_diagnosis, ldh_normal_range_upper,\n lymphatic_invasion_present, lymph_nodes_positive, method_of_diagnosis, morphology, new_event_anatomic_site, new_event_type,\n overall_survival, perineural_invasion_present, primary_diagnosis, primary_site, prior_malignancy, prior_treatment,\n progression_free_survival, progression_free_survival_event, progression_or_recurrence, race, residual_disease,\n site_of_resection_or_biopsy, status, synchronous_malignancy, tissue_or_organ_of_origin, tumor_cell_content, tumor_grade,\n tumor_stage, vascular_invasion_present, vital_status, year_of_birth, year_of_death, year_of_diagnosis\n }\n }\n ')
result_json = _query_pdc(clinical_query)
result_df = pd.DataFrame(result_json['data']['clinicalPerStudy'])
return result_df | -5,002,710,957,430,010,000 | Download PDC clinical data for a particular study. | cptac/pancan/file_download.py | _download_study_clin | PayneLab/cptac | python | def _download_study_clin(pdc_study_id):
clinical_query = (('\n query {\n clinicalPerStudy(pdc_study_id: "' + pdc_study_id) + '", acceptDUA: true) {\n age_at_diagnosis, ajcc_clinical_m, ajcc_clinical_n, ajcc_clinical_stage, ajcc_clinical_t, ajcc_pathologic_m,\n ajcc_pathologic_n, ajcc_pathologic_stage, ajcc_pathologic_t, ann_arbor_b_symptoms, ann_arbor_clinical_stage,\n ann_arbor_extranodal_involvement, ann_arbor_pathologic_stage, best_overall_response, burkitt_lymphoma_clinical_variant,\n case_id, case_submitter_id, cause_of_death, circumferential_resection_margin, classification_of_tumor, colon_polyps_history,\n days_to_best_overall_response, days_to_birth, days_to_death, days_to_diagnosis, days_to_hiv_diagnosis, days_to_last_follow_up,\n days_to_last_known_disease_status, days_to_new_event, days_to_recurrence, demographic_id, demographic_submitter_id,\n diagnosis_id, diagnosis_submitter_id, disease_type, ethnicity, figo_stage, gender, hiv_positive, hpv_positive_type, hpv_status,\n icd_10_code, iss_stage, last_known_disease_status, laterality, ldh_level_at_diagnosis, ldh_normal_range_upper,\n lymphatic_invasion_present, lymph_nodes_positive, method_of_diagnosis, morphology, new_event_anatomic_site, new_event_type,\n overall_survival, perineural_invasion_present, primary_diagnosis, primary_site, prior_malignancy, prior_treatment,\n progression_free_survival, progression_free_survival_event, progression_or_recurrence, race, residual_disease,\n site_of_resection_or_biopsy, status, synchronous_malignancy, tissue_or_organ_of_origin, tumor_cell_content, tumor_grade,\n tumor_stage, vascular_invasion_present, vital_status, year_of_birth, year_of_death, year_of_diagnosis\n }\n }\n ')
result_json = _query_pdc(clinical_query)
result_df = pd.DataFrame(result_json['data']['clinicalPerStudy'])
return result_df |
def _download_study_biospecimen(pdc_study_id):
'Download PDC biospecimen data for a particular study.'
biospecimen_query = (('\n query {\n biospecimenPerStudy(pdc_study_id: "' + pdc_study_id) + '", acceptDUA: true) {\n aliquot_submitter_id\n case_submitter_id\n }\n }\n ')
result_json = _query_pdc(biospecimen_query)
result_df = pd.DataFrame(result_json['data']['biospecimenPerStudy'])
return result_df | 5,144,662,956,955,882,000 | Download PDC biospecimen data for a particular study. | cptac/pancan/file_download.py | _download_study_biospecimen | PayneLab/cptac | python | def _download_study_biospecimen(pdc_study_id):
biospecimen_query = (('\n query {\n biospecimenPerStudy(pdc_study_id: "' + pdc_study_id) + '", acceptDUA: true) {\n aliquot_submitter_id\n case_submitter_id\n }\n }\n ')
result_json = _query_pdc(biospecimen_query)
result_df = pd.DataFrame(result_json['data']['biospecimenPerStudy'])
return result_df |
def _download_study_quant(pdc_study_id):
'Download PDC quantitative data for a particular study.'
proteome_query = (('\n query {\n quantDataMatrix(pdc_study_id: "' + pdc_study_id) + '", data_type: "log2_ratio", acceptDUA: true)\n }\n ')
result_json = _query_pdc(proteome_query)
result_df = pd.DataFrame(result_json['data']['quantDataMatrix'])
if (result_df.shape[1] != 0):
result_df = result_df.set_index(result_df.columns[0]).transpose()
else:
raise PdcDownloadError(f'quantDataMatrix table returned for PDC study ID {pdc_study_id} was empty.')
return result_df | 8,883,478,036,025,754,000 | Download PDC quantitative data for a particular study. | cptac/pancan/file_download.py | _download_study_quant | PayneLab/cptac | python | def _download_study_quant(pdc_study_id):
proteome_query = (('\n query {\n quantDataMatrix(pdc_study_id: "' + pdc_study_id) + '", data_type: "log2_ratio", acceptDUA: true)\n }\n ')
result_json = _query_pdc(proteome_query)
result_df = pd.DataFrame(result_json['data']['quantDataMatrix'])
if (result_df.shape[1] != 0):
result_df = result_df.set_index(result_df.columns[0]).transpose()
else:
raise PdcDownloadError(f'quantDataMatrix table returned for PDC study ID {pdc_study_id} was empty.')
return result_df |
def _query_pdc(query):
'Send a GraphQL query to the PDC and return the results.'
url = 'https://pdc.cancer.gov/graphql'
try:
response = requests.post(url, json={'query': query})
response.raise_for_status()
except requests.RequestException:
raise NoInternetError('Insufficient internet. Check your internet connection.') from None
return response.json() | 5,755,614,470,036,070,000 | Send a GraphQL query to the PDC and return the results. | cptac/pancan/file_download.py | _query_pdc | PayneLab/cptac | python | def _query_pdc(query):
url = 'https://pdc.cancer.gov/graphql'
try:
response = requests.post(url, json={'query': query})
response.raise_for_status()
except requests.RequestException:
raise NoInternetError('Insufficient internet. Check your internet connection.') from None
return response.json() |
def _check_ids_match(ids_map):
"Check that the ids in the download function's STUDY_IDS_MAP match up."
for cancer in ids_map.values():
for data in cancer.values():
pdc_study_id = data['pdc_study_id']
study_submitter_id = data['study_submitter_id']
query = (('\n query {\n study (pdc_study_id: "' + pdc_study_id) + '" acceptDUA: true) {\n pdc_study_id,\n study_submitter_id\n }\n }\n ')
idres = _query_pdc(query)
server_psi = idres['data']['study'][0]['pdc_study_id']
server_ssi = idres['data']['study'][0]['study_submitter_id']
assert (server_psi == pdc_study_id)
assert (server_ssi == study_submitter_id)
print(f'{server_psi} == {pdc_study_id}')
print(f'{server_ssi} == {study_submitter_id}')
print() | 1,036,462,430,489,785,300 | Check that the ids in the download function's STUDY_IDS_MAP match up. | cptac/pancan/file_download.py | _check_ids_match | PayneLab/cptac | python | def _check_ids_match(ids_map):
for cancer in ids_map.values():
for data in cancer.values():
pdc_study_id = data['pdc_study_id']
study_submitter_id = data['study_submitter_id']
query = (('\n query {\n study (pdc_study_id: "' + pdc_study_id) + '" acceptDUA: true) {\n pdc_study_id,\n study_submitter_id\n }\n }\n ')
idres = _query_pdc(query)
server_psi = idres['data']['study'][0]['pdc_study_id']
server_ssi = idres['data']['study'][0]['study_submitter_id']
assert (server_psi == pdc_study_id)
assert (server_ssi == study_submitter_id)
print(f'{server_psi} == {pdc_study_id}')
print(f'{server_ssi} == {study_submitter_id}')
print() |
def get_content_details(content, instance_path, instance_profile_id, instance_url, instance_language_id=None):
'gets details of a content item'
global monitor_new_content, auto_search
images = content.get('images')
for image in images:
image['url'] = '{0}{1}'.format(instance_url, image.get('url'))
monitored = content.get('monitored')
if (monitor_new_content is not None):
monitored = (True if monitor_new_content else False)
payload = {content_id_key: content.get(content_id_key), 'qualityProfileId': int((instance_profile_id or content.get('qualityProfileId'))), 'monitored': monitored, 'rootFolderPath': instance_path, 'images': images}
add_options = content.get('addOptions', {})
search_missing = (True if auto_search else False)
if is_sonarr:
payload['title'] = content.get('title')
payload['titleSlug'] = content.get('titleSlug')
payload['seasons'] = content.get('seasons')
payload['year'] = content.get('year')
payload['tvRageId'] = content.get('tvRageId')
payload['seasonFolder'] = content.get('seasonFolder')
payload['languageProfileId'] = (instance_language_id if instance_language_id else content.get('languageProfileId'))
payload['tags'] = content.get('tags')
payload['seriesType'] = content.get('seriesType')
payload['useSceneNumbering'] = content.get('useSceneNumbering')
payload['addOptions'] = {**add_options, **{'searchForMissingEpisodes': search_missing}}
elif is_radarr:
payload['title'] = content.get('title')
payload['year'] = content.get('year')
payload['tmdbId'] = content.get('tmdbId')
payload['titleSlug'] = content.get('titleSlug')
payload['addOptions'] = {**add_options, **{'searchForMovie': search_missing}}
elif is_lidarr:
payload['artistName'] = content.get('artistName')
payload['albumFolder'] = content.get('albumFolder')
payload['metadataProfileId'] = content.get('metadataProfileId')
payload['addOptions'] = {**add_options, **{'monitored': monitored, 'searchForMissingAlbums': search_missing}}
logger.debug(payload)
return payload | -798,187,230,711,055,700 | gets details of a content item | index.py | get_content_details | markschrik/syncarr | python | def get_content_details(content, instance_path, instance_profile_id, instance_url, instance_language_id=None):
global monitor_new_content, auto_search
images = content.get('images')
for image in images:
image['url'] = '{0}{1}'.format(instance_url, image.get('url'))
monitored = content.get('monitored')
if (monitor_new_content is not None):
monitored = (True if monitor_new_content else False)
payload = {content_id_key: content.get(content_id_key), 'qualityProfileId': int((instance_profile_id or content.get('qualityProfileId'))), 'monitored': monitored, 'rootFolderPath': instance_path, 'images': images}
add_options = content.get('addOptions', {})
search_missing = (True if auto_search else False)
if is_sonarr:
payload['title'] = content.get('title')
payload['titleSlug'] = content.get('titleSlug')
payload['seasons'] = content.get('seasons')
payload['year'] = content.get('year')
payload['tvRageId'] = content.get('tvRageId')
payload['seasonFolder'] = content.get('seasonFolder')
payload['languageProfileId'] = (instance_language_id if instance_language_id else content.get('languageProfileId'))
payload['tags'] = content.get('tags')
payload['seriesType'] = content.get('seriesType')
payload['useSceneNumbering'] = content.get('useSceneNumbering')
payload['addOptions'] = {**add_options, **{'searchForMissingEpisodes': search_missing}}
elif is_radarr:
payload['title'] = content.get('title')
payload['year'] = content.get('year')
payload['tmdbId'] = content.get('tmdbId')
payload['titleSlug'] = content.get('titleSlug')
payload['addOptions'] = {**add_options, **{'searchForMovie': search_missing}}
elif is_lidarr:
payload['artistName'] = content.get('artistName')
payload['albumFolder'] = content.get('albumFolder')
payload['metadataProfileId'] = content.get('metadataProfileId')
payload['addOptions'] = {**add_options, **{'monitored': monitored, 'searchForMissingAlbums': search_missing}}
logger.debug(payload)
return payload |
def exit_system():
'we dont want to exit if in docker'
if is_in_docker:
raise Exception
else:
sys.exit(0) | 7,366,073,034,452,480,000 | we dont want to exit if in docker | index.py | exit_system | markschrik/syncarr | python | def exit_system():
if is_in_docker:
raise Exception
else:
sys.exit(0) |
def get_kernelspecs():
'Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs'
end_point = '{}/api/kernelspecs'.format(gateway_host)
logger.info("Fetching kernelspecs from '{}' ...".format(end_point))
resp = requests.get(end_point)
if (not resp.ok):
raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code))
return resp.json() | -6,687,765,040,639,843,000 | Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs | kernel_image_puller.py | get_kernelspecs | dummys/kernel-image-puller | python | def get_kernelspecs():
end_point = '{}/api/kernelspecs'.format(gateway_host)
logger.info("Fetching kernelspecs from '{}' ...".format(end_point))
resp = requests.get(end_point)
if (not resp.ok):
raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code))
return resp.json() |
def fetch_image_names():
'Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway.\n \n For process-proxy kernelspecs, the image names are contained in the config stanza - which\n resides in the process-proxy stanza located in the metadata.\n '
kspecs = None
try:
kspecs_response = get_kernelspecs()
kspecs = kspecs_response.get('kernelspecs')
except Exception as ex:
logger.error('Got exception attempting to retrieve kernelspecs - retrying. Exception was: {}'.format(ex))
finally:
if (kspecs is None):
return False
images = set()
for key in kspecs.keys():
metadata = kspecs.get(key).get('spec').get('metadata')
if (metadata is not None):
process_proxy = metadata.get('process_proxy')
if (process_proxy is not None):
config = process_proxy.get('config')
if (config is not None):
image_name = config.get('image_name')
if (image_name is not None):
images.add(image_name)
executor_image_name = config.get('executor_image_name')
if (executor_image_name is not None):
images.add(executor_image_name)
for image_name in images:
name_queue.put_nowait(image_name)
return True | -6,910,645,233,095,386,000 | Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway.
For process-proxy kernelspecs, the image names are contained in the config stanza - which
resides in the process-proxy stanza located in the metadata. | kernel_image_puller.py | fetch_image_names | dummys/kernel-image-puller | python | def fetch_image_names():
'Fetches the image names by hitting the /api/kernelspecs endpoint of the Gateway.\n \n For process-proxy kernelspecs, the image names are contained in the config stanza - which\n resides in the process-proxy stanza located in the metadata.\n '
kspecs = None
try:
kspecs_response = get_kernelspecs()
kspecs = kspecs_response.get('kernelspecs')
except Exception as ex:
logger.error('Got exception attempting to retrieve kernelspecs - retrying. Exception was: {}'.format(ex))
finally:
if (kspecs is None):
return False
images = set()
for key in kspecs.keys():
metadata = kspecs.get(key).get('spec').get('metadata')
if (metadata is not None):
process_proxy = metadata.get('process_proxy')
if (process_proxy is not None):
config = process_proxy.get('config')
if (config is not None):
image_name = config.get('image_name')
if (image_name is not None):
images.add(image_name)
executor_image_name = config.get('executor_image_name')
if (executor_image_name is not None):
images.add(executor_image_name)
for image_name in images:
name_queue.put_nowait(image_name)
return True |
def pull_image(image_name):
'Pulls the image.\n \n If the policy is `IfNotPresent` the set of pulled image names is\n checked and, if present, the method returns. Otherwise, the pull attempt is made\n and the set of pulled images is updated, when successful.\n \n Since NotFound exceptions are tolerated, we trap for only that exception and let\n the caller handle others.\n '
if (policy == POLICY_IF_NOT_PRESENT):
if (image_name in pulled_images):
logger.info("Image '{}' already pulled and policy is '{}'. Checking existence.".format(image_name, policy))
try:
t1 = time.time()
docker_client.images.get(image_name)
t2 = time.time()
logger.debug("Checked existence of image '{}' in {:.3f} secs.".format(image_name, (t2 - t1)))
return
except NotFound:
pulled_images.remove(image_name)
logger.warning("Previously pulled image '{}' was not found - attempting pull...".format(image_name))
logger.debug("Pulling image '{}'...".format(image_name))
try:
t1 = time.time()
docker_client.images.pull(image_name)
t2 = time.time()
pulled_images.add(image_name)
logger.info("Pulled image '{}' in {:.3f} secs.".format(image_name, (t2 - t1)))
except NotFound:
logger.warning("Image '{}' was not found!".format(image_name)) | 1,680,901,528,725,841,700 | Pulls the image.
If the policy is `IfNotPresent` the set of pulled image names is
checked and, if present, the method returns. Otherwise, the pull attempt is made
and the set of pulled images is updated, when successful.
Since NotFound exceptions are tolerated, we trap for only that exception and let
the caller handle others. | kernel_image_puller.py | pull_image | dummys/kernel-image-puller | python | def pull_image(image_name):
'Pulls the image.\n \n If the policy is `IfNotPresent` the set of pulled image names is\n checked and, if present, the method returns. Otherwise, the pull attempt is made\n and the set of pulled images is updated, when successful.\n \n Since NotFound exceptions are tolerated, we trap for only that exception and let\n the caller handle others.\n '
if (policy == POLICY_IF_NOT_PRESENT):
if (image_name in pulled_images):
logger.info("Image '{}' already pulled and policy is '{}'. Checking existence.".format(image_name, policy))
try:
t1 = time.time()
docker_client.images.get(image_name)
t2 = time.time()
logger.debug("Checked existence of image '{}' in {:.3f} secs.".format(image_name, (t2 - t1)))
return
except NotFound:
pulled_images.remove(image_name)
logger.warning("Previously pulled image '{}' was not found - attempting pull...".format(image_name))
logger.debug("Pulling image '{}'...".format(image_name))
try:
t1 = time.time()
docker_client.images.pull(image_name)
t2 = time.time()
pulled_images.add(image_name)
logger.info("Pulled image '{}' in {:.3f} secs.".format(image_name, (t2 - t1)))
except NotFound:
logger.warning("Image '{}' was not found!".format(image_name)) |
def puller():
"Thread-based puller.\n \n Gets image name from the queue and attempts to pull the image. Any issues, except\n for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the\n retries have been exceeded, the queue task is marked as done.\n "
while True:
image_name = name_queue.get()
if (image_name is None):
break
i = 0
while (i < num_retries):
try:
pull_image(image_name)
break
except APIError as ex:
i += 1
if (i < num_retries):
logger.warning("Attempt {} to pull image '{}' encountered exception - retrying. Exception was: {}".format(i, image_name, ex))
else:
logger.error("Attempt {} to pull image '{}' failed with exception: {}".format(i, image_name, ex))
name_queue.task_done() | 2,182,722,637,967,463,000 | Thread-based puller.
Gets image name from the queue and attempts to pull the image. Any issues, except
for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the
retries have been exceeded, the queue task is marked as done. | kernel_image_puller.py | puller | dummys/kernel-image-puller | python | def puller():
"Thread-based puller.\n \n Gets image name from the queue and attempts to pull the image. Any issues, except\n for NotFound, are retried up to num_retries times. Once the image has been pulled, it's not found or the\n retries have been exceeded, the queue task is marked as done.\n "
while True:
image_name = name_queue.get()
if (image_name is None):
break
i = 0
while (i < num_retries):
try:
pull_image(image_name)
break
except APIError as ex:
i += 1
if (i < num_retries):
logger.warning("Attempt {} to pull image '{}' encountered exception - retrying. Exception was: {}".format(i, image_name, ex))
else:
logger.error("Attempt {} to pull image '{}' failed with exception: {}".format(i, image_name, ex))
name_queue.task_done() |
def flip_cihp(tail_list):
'\n\n :param tail_list: tail_list size is 1 x n_class x h x w\n :return:\n '
tail_list_rev = ([None] * 20)
for xx in range(14):
tail_list_rev[xx] = tail_list[xx].unsqueeze(0)
tail_list_rev[14] = tail_list[15].unsqueeze(0)
tail_list_rev[15] = tail_list[14].unsqueeze(0)
tail_list_rev[16] = tail_list[17].unsqueeze(0)
tail_list_rev[17] = tail_list[16].unsqueeze(0)
tail_list_rev[18] = tail_list[19].unsqueeze(0)
tail_list_rev[19] = tail_list[18].unsqueeze(0)
return torch.cat(tail_list_rev, dim=0) | 6,911,805,198,421,112,000 | :param tail_list: tail_list size is 1 x n_class x h x w
:return: | exp/inference/inference_dir.py | flip_cihp | ericwang0701/Graphonomy | python | def flip_cihp(tail_list):
'\n\n :param tail_list: tail_list size is 1 x n_class x h x w\n :return:\n '
tail_list_rev = ([None] * 20)
for xx in range(14):
tail_list_rev[xx] = tail_list[xx].unsqueeze(0)
tail_list_rev[14] = tail_list[15].unsqueeze(0)
tail_list_rev[15] = tail_list[14].unsqueeze(0)
tail_list_rev[16] = tail_list[17].unsqueeze(0)
tail_list_rev[17] = tail_list[16].unsqueeze(0)
tail_list_rev[18] = tail_list[19].unsqueeze(0)
tail_list_rev[19] = tail_list[18].unsqueeze(0)
return torch.cat(tail_list_rev, dim=0) |
def decode_labels(mask, num_images=1, num_classes=20):
'Decode batch of segmentation masks.\n\n Args:\n mask: result of inference after taking argmax.\n num_images: number of images to decode from the batch.\n num_classes: number of classes to predict (including background).\n\n Returns:\n A batch with num_images RGB images of the same size as the input.\n '
(n, h, w) = mask.shape
assert (n >= num_images), ('Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images))
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[(i, 0)]), len(mask[i])))
pixels = img.load()
for (j_, j) in enumerate(mask[i, :, :]):
for (k_, k) in enumerate(j):
if (k < num_classes):
pixels[(k_, j_)] = label_colours[k]
outputs[i] = np.array(img)
return outputs | -6,977,466,826,084,187,000 | Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input. | exp/inference/inference_dir.py | decode_labels | ericwang0701/Graphonomy | python | def decode_labels(mask, num_images=1, num_classes=20):
'Decode batch of segmentation masks.\n\n Args:\n mask: result of inference after taking argmax.\n num_images: number of images to decode from the batch.\n num_classes: number of classes to predict (including background).\n\n Returns:\n A batch with num_images RGB images of the same size as the input.\n '
(n, h, w) = mask.shape
assert (n >= num_images), ('Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images))
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[(i, 0)]), len(mask[i])))
pixels = img.load()
for (j_, j) in enumerate(mask[i, :, :]):
for (k_, k) in enumerate(j):
if (k < num_classes):
pixels[(k_, j_)] = label_colours[k]
outputs[i] = np.array(img)
return outputs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.