text
stringlengths 4
1.02M
| meta
dict |
---|---|
__author__ = 'jawaad'
from distutils.core import setup
setup(name='pytbar',
version='0.300',
description='Library that converts the Japanese Post Office\'s CSV files into a python class. These are '
'thereafter added to a database.',
author='Jawaad Mahmood',
author_email='[email protected]',
url='https://github.com/jmahmood/pytbar/',
license='MIT',
packages=['pytbar'], requires=['redis']) | {
"content_hash": "1e164c7ac92b0a07a25321f4c99a26ad",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 112,
"avg_line_length": 35.38461538461539,
"alnum_prop": 0.6413043478260869,
"repo_name": "jmahmood/pytbar",
"id": "7b79fa050b37bf1067fd1ca82c23662ae43037f6",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5295"
}
],
"symlink_target": ""
} |
import unittest
import numpy
from numpy.testing.utils import assert_almost_equal
from cmepy.statistics import Distribution
from cmepy.measurement import Measurement
class MeasurementTests(unittest.TestCase):
def test_attribute_trickery(self):
m = Measurement()
m.write(0.0, Distribution({(0, ) : 1.0, (1, ) : 0.0}))
m.write(0.5, Distribution({(0, ) : 0.5, (1, ) : 0.5}))
m.write(1.0, Distribution({(0, ) : 0.0, (1, ) : 1.0}))
assert_almost_equal(numpy.array(m.expectation),
[[0], [0.5], [1]])
assert_almost_equal(numpy.array(m.variance),
[0, 0.25, 0])
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(MeasurementTests)
return suite
def main():
unittest.run(MeasurementTests)
if __name__ == '__main__':
main()
| {
"content_hash": "988086a4c77737c7e55c7072aeb80418",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 27.03030303030303,
"alnum_prop": 0.570627802690583,
"repo_name": "hegland/cmepy",
"id": "d822348ece9542534885a5290bb3d715f1031802",
"size": "892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmepy/tests/measurement_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "171791"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
import os.path
import re
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.compat import samefile
from pip._internal.exceptions import BadCommand
from pip._internal.utils.misc import display_path
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.vcs import VersionControl, vcs
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
HASH_REGEX = re.compile('[a-fA-F0-9]{40}')
def looks_like_hash(sha):
return bool(HASH_REGEX.match(sha))
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
# Prevent the user's environment variables from interfering with pip:
# https://github.com/pypa/pip/issues/1130
unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
default_arg_rev = 'HEAD'
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
super(Git, self).__init__(url, *args, **kwargs)
def get_base_rev_args(self, rev):
return [rev]
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(['version'], show_stdout=False)
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
# get first 3 positions of the git version becasue
# on windows it is x.y.z.windows.t, and this parses as
# LegacyVersion which always smaller than a Version.
version = '.'.join(version.split('.')[:3])
return parse_version(version)
def export(self, location):
"""Export the Git repository at the url to the destination location"""
if not location.endswith('/'):
location = location + '/'
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path)
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir.path
)
def get_revision_sha(self, dest, rev):
"""
Return a commit hash for the given revision if it names a remote
branch or tag. Otherwise, return None.
Args:
dest: the repository directory.
rev: the revision name.
"""
# Pass rev to pre-filter the list.
output = self.run_command(['show-ref', rev], cwd=dest,
show_stdout=False, on_returncode='ignore')
refs = {}
for line in output.strip().splitlines():
try:
sha, ref = line.split()
except ValueError:
# Include the offending line to simplify troubleshooting if
# this error ever occurs.
raise ValueError('unexpected show-ref line: {!r}'.format(line))
refs[ref] = sha
branch_ref = 'refs/remotes/origin/{}'.format(rev)
tag_ref = 'refs/tags/{}'.format(rev)
return refs.get(branch_ref) or refs.get(tag_ref)
def check_rev_options(self, dest, rev_options):
"""Check the revision options before checkout.
Returns a new RevOptions object for the SHA1 of the branch or tag
if found.
Args:
rev_options: a RevOptions object.
"""
rev = rev_options.arg_rev
sha = self.get_revision_sha(dest, rev)
if sha is not None:
return rev_options.make_new(sha)
# Do not show a warning for the common case of something that has
# the form of a Git commit hash.
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
return rev_options
def is_commit_id_equal(self, dest, name):
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return self.get_revision(dest) == name
def switch(self, dest, url, rev_options):
self.run_command(['config', 'remote.origin.url', url], cwd=dest)
cmd_args = ['checkout', '-q'] + rev_options.to_args()
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
if self.get_git_version() >= parse_version('1.9.0'):
# fetch tags in addition to everything else
self.run_command(['fetch', '-q', '--tags'], cwd=dest)
else:
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
rev_options = self.check_rev_options(dest, rev_options)
cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args()
self.run_command(cmd_args, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = self.make_rev_options(rev)
if self.check_destination(dest, url, rev_options):
rev_display = rev_options.to_display()
logger.info(
'Cloning %s%s to %s', url, rev_display, display_path(dest),
)
self.run_command(['clone', '-q', url, dest])
if rev:
rev_options = self.check_rev_options(dest, rev_options)
# Only do a checkout if the current commit id doesn't match
# the requested revision.
if not self.is_commit_id_equal(dest, rev_options.rev):
cmd_args = ['fetch', '-q', url] + rev_options.to_args()
self.run_command(cmd_args, cwd=dest)
self.run_command(
['checkout', '-q', 'FETCH_HEAD'],
cwd=dest,
)
#: repo may contain submodules
self.update_submodules(dest)
def get_url(self, location):
"""Return URL of the first remote encountered."""
remotes = self.run_command(
['config', '--get-regexp', r'remote\..*\.url'],
show_stdout=False, cwd=location,
)
remotes = remotes.splitlines()
found_remote = remotes[0]
for remote in remotes:
if remote.startswith('remote.origin.url '):
found_remote = remote
break
url = found_remote.split(' ')[1]
return url.strip()
def get_revision(self, location):
current_rev = self.run_command(
['rev-parse', 'HEAD'], show_stdout=False, cwd=location,
)
return current_rev.strip()
def _get_subdirectory(self, location):
"""Return the relative path of setup.py to the git repo root."""
# find the repo root
git_dir = self.run_command(['rev-parse', '--git-dir'],
show_stdout=False, cwd=location).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
root_dir = os.path.join(git_dir, '..')
# find setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
# relative path of setup.py to repo root
if samefile(root_dir, location):
return None
return os.path.relpath(location, root_dir)
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
subdirectory = self._get_subdirectory(location)
if subdirectory:
req += '&subdirectory=' + subdirectory
return req
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if '://' not in self.url:
assert 'file:' not in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
self.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def controls_location(cls, location):
if super(Git, cls).controls_location(location):
return True
try:
r = cls().run_command(['rev-parse'],
cwd=location,
show_stdout=False,
on_returncode='ignore')
return not r
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return False
vcs.register(Git)
| {
"content_hash": "5fd78b096bb10afaa0d6e4b09eef8313",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 79,
"avg_line_length": 36.45928338762215,
"alnum_prop": 0.5539176270883588,
"repo_name": "zvezdan/pip",
"id": "3528d8fd9b3b237231e0c2873a5ccb0b431a0e29",
"size": "11193",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/pip/_internal/vcs/git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "3965"
},
{
"name": "HTML",
"bytes": "2342"
},
{
"name": "Python",
"bytes": "1073344"
},
{
"name": "Shell",
"bytes": "382"
}
],
"symlink_target": ""
} |
import re, os, signal, time, filecmp, stat, fileinput
import yaml
from gppylib.commands.gp import GpStart, chk_local_db_running
from gppylib.commands.base import Command, ExecutionError, REMOTE
from gppylib.db import dbconn
from gppylib.gparray import GpArray, MODE_SYNCHRONIZED
from gppylib.operations.backup_utils import pg, escapeDoubleQuoteInSQLString
PARTITION_START_DATE = '2010-01-01'
PARTITION_END_DATE = '2013-01-01'
GET_APPENDONLY_DATA_TABLE_INFO_SQL ="""SELECT ALL_DATA_TABLES.oid, ALL_DATA_TABLES.schemaname, ALL_DATA_TABLES.tablename, OUTER_PG_CLASS.relname as tupletable FROM(
SELECT ALLTABLES.oid, ALLTABLES.schemaname, ALLTABLES.tablename FROM
(SELECT c.oid, n.nspname AS schemaname, c.relname AS tablename FROM pg_class c, pg_namespace n
WHERE n.oid = c.relnamespace) as ALLTABLES,
(SELECT n.nspname AS schemaname, c.relname AS tablename
FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace
WHERE c.relkind = 'r'::"char" AND c.oid > 16384 AND (c.relnamespace > 16384 or n.nspname = 'public')
EXCEPT
((SELECT x.schemaname, x.partitiontablename FROM
(SELECT distinct schemaname, tablename, partitiontablename, partitionlevel FROM pg_partitions) as X,
(SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel FROM pg_partitions group by (tablename, schemaname))
as Y
WHERE x.schemaname = y.schemaname and x.tablename = Y.maxtable and x.partitionlevel != Y.maxlevel)
UNION (SELECT distinct schemaname, tablename FROM pg_partitions))) as DATATABLES
WHERE ALLTABLES.schemaname = DATATABLES.schemaname and ALLTABLES.tablename = DATATABLES.tablename AND ALLTABLES.oid not in (select reloid from pg_exttable)
) as ALL_DATA_TABLES, pg_appendonly, pg_class OUTER_PG_CLASS
WHERE ALL_DATA_TABLES.oid = pg_appendonly.relid
AND OUTER_PG_CLASS.oid = pg_appendonly.segrelid
"""
GET_ALL_AO_DATATABLES_SQL = """
%s AND pg_appendonly.columnstore = 'f'
""" % GET_APPENDONLY_DATA_TABLE_INFO_SQL
GET_ALL_CO_DATATABLES_SQL = """
%s AND pg_appendonly.columnstore = 't'
""" % GET_APPENDONLY_DATA_TABLE_INFO_SQL
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
if master_data_dir is None:
raise Exception('MASTER_DATA_DIRECTORY is not set')
def execute_sql(dbname, sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, sql)
conn.commit()
def execute_sql_singleton(dbname, sql):
result = None
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, sql)
if result is None:
raise Exception("error running query: %s" % sql)
return result
def has_exception(context):
if not hasattr(context, 'exception'):
return False
if context.exception:
return True
else:
return False
def run_command(context, command):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
context.exception = e
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_cmd(command):
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
print 'caught exception %s'%e
result = cmd.get_results()
return (result.rc, result.stdout, result.stderr)
def run_command_remote(context,command, host, source_file, export_mdd):
cmd = Command(name='run command %s'%command,
cmdStr='gpssh -h %s -e \'source %s; %s; %s\''%(host, source_file,export_mdd, command))
cmd.run(validateAfter=True)
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_gpcommand(context, command):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='$GPHOME/bin/%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
context.exception = e
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def check_stdout_msg(context, msg):
pat = re.compile(msg)
if not pat.search(context.stdout_message):
err_str = "Expected stdout string '%s' and found: '%s'" % (msg, context.stdout_message)
raise Exception(err_str)
def check_string_not_present_stdout(context, msg):
pat = re.compile(msg)
if pat.search(context.stdout_message):
err_str = "Did not expect stdout string '%s' but found: '%s'" % (msg, context.stdout_message)
raise Exception(err_str)
def check_err_msg(context, err_msg):
if not hasattr(context, 'exception'):
raise Exception('An exception was not raised and it was expected')
pat = re.compile(err_msg)
if not pat.search(context.error_message):
err_str = "Expected error string '%s' and found: '%s'" % (err_msg, context.error_message)
raise Exception(err_str)
def check_return_code(context, ret_code):
if context.ret_code != int(ret_code):
emsg = ""
if context.error_message:
emsg += context.error_message
raise Exception("expected return code '%s' does not equal actual return code '%s' %s" % (ret_code, context.ret_code, emsg))
def check_database_is_running(context):
if not 'PGPORT' in os.environ:
raise Exception('PGPORT should be set')
pgport = int(os.environ['PGPORT'])
running_status = chk_local_db_running(master_data_dir, pgport)
gpdb_running = running_status[0] and running_status[1] and running_status[2] and running_status[3]
return gpdb_running
def start_database_if_not_started(context):
if not check_database_is_running(context):
start_database(context)
def start_database(context):
run_gpcommand(context, 'gpstart -a')
if context.exception:
raise context.exception
def stop_database_if_started(context):
if check_database_is_running(context):
stop_database(context)
def stop_database(context):
run_gpcommand(context, 'gpstop -M fast -a')
if context.exception:
raise context.exception
def getRows(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
return results
def getRow(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
result = curs.fetchone()
return result
def check_db_exists(dbname, host=None, port=0, user=None):
LIST_DATABASE_SQL = 'select datname from pg_database'
results = []
with dbconn.connect(dbconn.DbURL(hostname=host, username=user, port=port, dbname='template1')) as conn:
curs = dbconn.execSQL(conn, LIST_DATABASE_SQL)
results = curs.fetchall()
for result in results:
if result[0] == dbname:
return True
return False
def create_database_if_not_exists(context, dbname, host=None, port=0, user=None):
if not check_db_exists(dbname, host, port, user):
create_database(context, dbname, host, port, user)
def create_database(context, dbname=None, host=None, port=0, user=None):
LOOPS = 10
if host == None or port == 0 or user == None:
createdb_cmd = 'createdb %s' % dbname
else:
createdb_cmd = 'psql -h %s -p %d -U %s -d template1 -c "create database %s"' % (host,
port, user, dbname)
for i in range(LOOPS):
context.exception = None
run_command(context, createdb_cmd)
if context.exception:
time.sleep(1)
continue
if check_db_exists(dbname, host, port, user):
return
time.sleep(1)
if context.exception:
raise context.exception
raise Exception("create database for '%s' failed after %d attempts" % (dbname, LOOPS))
def clear_all_saved_data_verify_files(context):
current_dir = os.getcwd()
data_dir = os.path.join(current_dir, './gppylib/test/data')
cmd = 'rm %s/*' % data_dir
run_command(context, cmd)
def get_table_data_to_file(filename, tablename, dbname):
current_dir = os.getcwd()
filename = os.path.join(current_dir, './gppylib/test/data', filename)
order_sql = """
select string_agg(a, ',')
from (
select generate_series(1,c.relnatts+1) as a
from pg_class as c
inner join pg_namespace as n
on c.relnamespace = n.oid
where (n.nspname || '.' || c.relname = E'%s')
or c.relname = E'%s'
) as q;
""" % (pg.escape_string(tablename), pg.escape_string(tablename))
query = order_sql
conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
try:
res = dbconn.execSQLForSingleton(conn, query)
# check if tablename is fully qualified <schema_name>.<table_name>
if '.' in tablename:
schema_name, table_name = tablename.split('.')
data_sql = '''COPY (select gp_segment_id, * from "%s"."%s" order by %s) TO '%s' ''' % (escapeDoubleQuoteInSQLString(schema_name, False),
escapeDoubleQuoteInSQLString(table_name, False), res, filename)
else:
data_sql = '''COPY (select gp_segment_id, * from "%s" order by %s) TO '%s' ''' %(escapeDoubleQuoteInSQLString(tablename, False), res, filename)
query = data_sql
dbconn.execSQL(conn, query)
conn.commit()
except Exception as e:
print "Cannot execute the query '%s' on the connection %s" % (query, str(dbconn.DbURL(dbname=dbname)))
print "Exception: %s" % str(e)
conn.close()
def diff_backup_restore_data(context, backup_file, restore_file):
if not filecmp.cmp(backup_file, restore_file):
raise Exception('%s and %s do not match' % (backup_file, restore_file))
def validate_restore_data(context, tablename, dbname, backedup_table=None):
filename = tablename.strip() + "_restore"
get_table_data_to_file(filename, tablename, dbname)
current_dir = os.getcwd()
if backedup_table != None:
backup_file = os.path.join(current_dir, './gppylib/test/data', backedup_table.strip() + "_backup")
else:
backup_file = os.path.join(current_dir, './gppylib/test/data', tablename.strip() + "_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', tablename.strip() + "_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def validate_restore_data_in_file(context, tablename, dbname, file_name, backedup_table=None):
filename = file_name + "_restore"
get_table_data_to_file(filename, tablename, dbname)
current_dir = os.getcwd()
if backedup_table != None:
backup_file = os.path.join(current_dir, './gppylib/test/data', backedup_table.strip() + "_backup")
else:
backup_file = os.path.join(current_dir, './gppylib/test/data', file_name + "_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', file_name + "_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def validate_db_data(context, dbname, expected_table_count):
tbls = get_table_names(dbname)
if len(tbls) != expected_table_count:
raise Exception("db %s does not have expected number of tables %d != %d" % (dbname, expected_table_count, len(tbls)))
for t in tbls:
name = "%s.%s" % (t[0], t[1])
validate_restore_data(context, name, dbname)
def get_segment_hostnames(context, dbname):
sql = "select distinct(hostname) from gp_segment_configuration where content != -1;"
return getRows(dbname, sql)
def backup_db_data(context, dbname):
tbls = get_table_names(dbname)
for t in tbls:
nm = "%s.%s" % (t[0], t[1])
backup_data(context, nm, dbname)
def backup_data(context, tablename, dbname):
filename = tablename + "_backup"
get_table_data_to_file(filename, tablename, dbname)
def backup_data_to_file(context, tablename, dbname, filename):
filename = filename + "_backup"
get_table_data_to_file(filename, tablename, dbname)
def check_partition_table_exists(context, dbname, schemaname, table_name, table_type=None, part_level=1, part_number=1):
partitions = get_partition_names(schemaname, table_name, dbname, part_level, part_number)
if not partitions:
return False
return check_table_exists(context, dbname, partitions[0][0].strip(), table_type)
def check_table_exists(context, dbname, table_name, table_type=None, host=None, port=0, user=None):
if '.' in table_name:
schemaname, tablename = table_name.split('.')
SQL = """
select c.oid, c.relkind, c.relstorage, c.reloptions
from pg_class c, pg_namespace n
where c.relname = E'%s' and n.nspname = E'%s' and c.relnamespace = n.oid;
""" % (pg.escape_string(tablename), pg.escape_string(schemaname))
else:
SQL = """
select oid, relkind, relstorage, reloptions \
from pg_class \
where relname = E'%s'; \
""" % pg.escape_string(table_name)
table_row = None
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
try:
table_row = dbconn.execSQLForSingletonRow(conn, SQL)
except Exception as e:
context.exception = e
return False
if table_type is None:
return True
if table_row[2] == 'a':
original_table_type = 'ao'
elif table_row[2] == 'c':
original_table_type = 'co'
elif table_row[2] == 'h':
original_table_type = 'heap'
elif table_row[2] == 'x':
original_table_type = 'external'
elif table_row[2] == 'v':
original_table_type = 'view'
else:
raise Exception('Unknown table type %s' % table_row[2])
if original_table_type != table_type.strip():
return False
return True
def check_pl_exists(context, dbname, lan_name):
SQL = """select count(*) from pg_language where lanname='%s';""" % lan_name
lan_count = getRows(dbname, SQL)[0][0]
if lan_count == 0:
return False
return True
def check_constraint_exists(context, dbname, conname):
SQL = """select count(*) from pg_constraint where conname='%s';""" % conname
con_count = getRows(dbname, SQL)[0][0]
if con_count == 0:
return False
return True
def drop_external_table_if_exists(context, table_name, dbname):
if check_table_exists(context, table_name=table_name, dbname=dbname, table_type='external'):
drop_external_table(context, table_name=table_name, dbname=dbname)
def drop_table_if_exists(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop table if exists %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
def drop_external_table(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop external table %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_table_exists(context, table_name=table_name, dbname=dbname, table_type='external', host=host, port=port, user=user):
raise Exception('Unable to successfully drop the table %s' % table_name)
def drop_table(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop table %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, username=user, port=port, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_table_exists(context, table_name=table_name, dbname=dbname, host=host, port=port, user=user):
raise Exception('Unable to successfully drop the table %s' % table_name)
def check_schema_exists(context, schema_name, dbname):
schema_check_sql = "select * from pg_namespace where nspname='%s';" % schema_name
if len(getRows(dbname, schema_check_sql)) < 1:
return False
return True
def drop_schema_if_exists(context, schema_name, dbname):
if check_schema_exists(context, schema_name, dbname):
drop_schema(context, schema_name, dbname)
def drop_schema(context, schema_name, dbname):
SQL = 'drop schema %s cascade' % schema_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_schema_exists(context, schema_name, dbname):
raise Exception('Unable to successfully drop the schema %s' % schema_name)
def validate_table_data_on_segments(context, tablename, dbname):
seg_data_sql = "select gp_segment_id, count(*) from gp_dist_random('%s') group by gp_segment_id;" % tablename
rows = getRows(dbname, seg_data_sql)
for row in rows:
if row[1] == '0' :
raise Exception('Data not present in segment %s' % row[0])
def get_table_names(dbname):
sql = """
SELECT n.nspname AS schemaname, c.relname AS tablename\
FROM pg_class c\
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace\
WHERE c.relkind = 'r'::"char" AND c.oid > 16384 AND (c.relnamespace > 16384 or n.nspname = 'public')
AND n.nspname NOT LIKE 'pg_temp_%'
"""
return getRows(dbname, sql)
def get_partition_tablenames(tablename, dbname, part_level = 1):
child_part_sql = "select partitiontablename from pg_partitions where tablename='%s' and partitionlevel=%s;" % (tablename, part_level)
rows = getRows(dbname, child_part_sql)
return rows
def get_partition_names(schemaname, tablename, dbname, part_level, part_number):
part_num_sql = """select partitionschemaname || '.' || partitiontablename from pg_partitions
where schemaname='%s' and tablename='%s'
and partitionlevel=%s and partitionposition=%s;""" % (schemaname, tablename, part_level, part_number)
rows = getRows(dbname, part_num_sql)
return rows
def validate_part_table_data_on_segments(context, tablename, part_level, dbname):
rows = get_partition_tablenames(tablename, dbname, part_level)
for part_tablename in rows :
seg_data_sql = "select gp_segment_id, count(*) from gp_dist_random('%s') group by gp_segment_id;" % part_tablename[0]
rows = getRows(dbname, seg_data_sql)
for row in rows:
if row[1] == '0' :
raise Exception('Data not present in segment %s' % row[0])
def validate_mixed_partition_storage_types(context, tablename, dbname):
partition_names = get_partition_tablenames(tablename, dbname, part_level = 1)
for position, partname in enumerate(partition_names):
if position in(0, 2, 5, 7):
storage_type = 'c'
elif position in(1, 3, 6, 8):
storage_type = 'a'
else:
storage_type = 'h'
for part in partname:
validate_storage_type(context, part, storage_type, dbname)
def validate_storage_type(context, partname, storage_type, dbname):
storage_type_sql = "select oid::regclass, relstorage from pg_class where oid = '%s'::regclass;" % (partname)
rows = getRows(dbname, storage_type_sql)
for row in rows:
if row[1].strip() != storage_type.strip():
raise Exception("The storage type of the partition %s is not as expected %s "% (row[1], storage_type))
def create_mixed_storage_partition(context, tablename, dbname):
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table %s (%s) Distributed randomly \
Partition by list(Column2) \
Subpartition by range(Column3) Subpartition Template ( \
subpartition s_1 start(date '2010-01-01') end(date '2011-01-01') with (appendonly=true, orientation=column, compresstype=zlib, compresslevel=1), \
subpartition s_2 start(date '2011-01-01') end(date '2012-01-01') with (appendonly=true, orientation=row, compresstype=zlib, compresslevel=1), \
subpartition s_3 start(date '2012-01-01') end(date '2013-01-01') with (appendonly=true, orientation=column), \
subpartition s_4 start(date '2013-01-01') end(date '2014-01-01') with (appendonly=true, orientation=row), \
subpartition s_5 start(date '2014-01-01') end(date '2015-01-01') ) \
(partition p1 values('backup') , partition p2 values('restore')) \
;" % (tablename, table_definition)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
conn.commit()
populate_partition(tablename, '2010-01-01', dbname, 0)
def create_external_partition(context, tablename, dbname, port, filename):
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table %s (%s) Distributed randomly \
Partition by range(Column3) ( \
partition p_1 start(date '2010-01-01') end(date '2011-01-01') with (appendonly=true, orientation=column, compresstype=zlib, compresslevel=1), \
partition p_2 start(date '2011-01-01') end(date '2012-01-01') with (appendonly=true, orientation=row, compresstype=zlib, compresslevel=1), \
partition s_3 start(date '2012-01-01') end(date '2013-01-01') with (appendonly=true, orientation=column), \
partition s_4 start(date '2013-01-01') end(date '2014-01-01') with (appendonly=true, orientation=row), \
partition s_5 start(date '2014-01-01') end(date '2015-01-01') ) \
;" % (tablename, table_definition)
master_hostname = get_master_hostname();
create_ext_table_str = "Create readable external table %s_ret (%s) \
location ('gpfdist://%s:%s/%s') \
format 'csv' encoding 'utf-8' \
log errors segment reject limit 1000 \
;" % (tablename, table_definition, master_hostname[0][0].strip(), port, filename)
alter_table_str = "Alter table %s exchange partition p_2 \
with table %s_ret without validation \
;" % (tablename, tablename)
drop_table_str = "Drop table %s_ret;" % (tablename)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
dbconn.execSQL(conn, create_ext_table_str)
dbconn.execSQL(conn, alter_table_str)
dbconn.execSQL(conn, drop_table_str)
conn.commit()
populate_partition(tablename, '2010-01-01', dbname, 0, 100)
def modify_partition_data(context, tablename, dbname, partitionnum):
# ONLY works for partition 1 to 3
if partitionnum == 1:
year = '2010'
elif partitionnum == 2:
year = '2011'
elif partitionnum == 3:
year = '2012'
else:
raise Exception("BAD PARAM to modify_partition_data %s" % partitionnum)
cmdStr = """ echo "90,backup,%s-12-30" | psql -d %s -c "copy %s from stdin delimiter ',';" """ % (year, dbname, tablename)
for i in range(10):
cmd = Command(name='insert data into %s' % tablename, cmdStr=cmdStr)
cmd.run(validateAfter=True)
def modify_data(context, tablename, dbname):
cmdStr = 'psql -d %s -c "copy %s to stdout;" | psql -d %s -c "copy %s from stdin;"' % (dbname, tablename, dbname, tablename)
cmd = Command(name='insert data into %s' % tablename, cmdStr=cmdStr)
cmd.run(validateAfter=True)
def add_partition(context, partitionnum, tablename, dbname):
alter_table_str = "alter table %s add default partition p%s; insert into %s select i+%d, 'update', i + date '%s' from generate_series(0,1094) as i" \
% (tablename, partitionnum, tablename, int(partitionnum), PARTITION_START_DATE)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, alter_table_str)
conn.commit()
def drop_partition(context, partitionnum, tablename, dbname):
alter_table_str = "alter table %s drop partition p%s;" % (tablename, partitionnum)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, alter_table_str)
conn.commit()
def create_partition(context, tablename, storage_type, dbname, compression_type=None, partition=True, rowcount=1094, with_data=True, host=None, port=0, user=None):
interval = '1 year'
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table " + tablename + "(" + table_definition + ")"
storage_type_dict = {'ao':'row', 'co':'column'}
part_table = " Distributed Randomly Partition by list(Column2) \
Subpartition by range(Column3) Subpartition Template \
(start (date '%s') end (date '%s') every (interval '%s')) \
(Partition p1 values('backup') , Partition p2 values('restore')) " \
%(PARTITION_START_DATE, PARTITION_END_DATE, interval)
if storage_type == "heap":
create_table_str = create_table_str
if partition:
create_table_str = create_table_str + part_table
elif storage_type == "ao" or storage_type == "co":
create_table_str = create_table_str + " WITH(appendonly = true, orientation = %s) " % storage_type_dict[storage_type]
if compression_type is not None:
create_table_str = create_table_str[:-2] + ", compresstype = " + compression_type + ") "
if partition:
create_table_str = create_table_str + part_table
create_table_str = create_table_str + ";"
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
conn.commit()
if with_data:
populate_partition(tablename, PARTITION_START_DATE, dbname, 0, rowcount, host, port, user)
# same data size as populate partition, but different values
def populate_partition_diff_data_same_eof(tablename, dbname):
populate_partition(tablename, PARTITION_START_DATE, dbname, 1)
def populate_partition_same_data(tablename, dbname):
populate_partition(tablename, PARTITION_START_DATE, dbname, 0)
def populate_partition(tablename, start_date, dbname, data_offset, rowcount=1094, host=None, port=0, user=None):
insert_sql_str = "insert into %s select i+%d, 'backup', i + date '%s' from generate_series(0,%d) as i" %(tablename, data_offset, start_date, rowcount)
insert_sql_str += "; insert into %s select i+%d, 'restore', i + date '%s' from generate_series(0,%d) as i" %(tablename, data_offset, start_date, rowcount)
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, insert_sql_str)
conn.commit()
def create_indexes(context, table_name, indexname, dbname):
btree_index_sql = "create index btree_%s on %s using btree(column1);" % (indexname, table_name)
bitmap_index_sql = "create index bitmap_%s on %s using bitmap(column3);" % (indexname, table_name)
index_sql = btree_index_sql + bitmap_index_sql
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, index_sql)
conn.commit()
validate_index(context, table_name, dbname)
def validate_index(context, table_name, dbname):
index_sql = "select count(indexrelid::regclass) from pg_index, pg_class where indrelid = '%s'::regclass group by indexrelid;" % table_name
rows = getRows(dbname, index_sql)
if len(rows) != 2:
raise Exception('Index creation was not successful. Expected 2 rows does not match %d rows' % result)
def create_schema(context, schema_name, dbname):
if not check_schema_exists(context, schema_name, dbname):
schema_sql = "create schema %s" % schema_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, schema_sql)
conn.commit()
def create_int_table(context, table_name, table_type='heap', dbname='testdb'):
CREATE_TABLE_SQL = None
NROW = 1000
table_type = table_type.upper()
if table_type == 'AO':
CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE) as select generate_series(1,%d) as c1' % (table_name, NROW)
elif table_type == 'CO':
CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE, orientation=column) as select generate_series(1, %d) as c1' % (table_name, NROW)
elif table_type == 'HEAP':
CREATE_TABLE_SQL = 'create table %s as select generate_series(1, %d) as c1' % (table_name, NROW)
if CREATE_TABLE_SQL is None:
raise Exception('Invalid table type specified')
SELECT_TABLE_SQL = 'select count(*) from %s' % table_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, CREATE_TABLE_SQL)
conn.commit()
result = dbconn.execSQLForSingleton(conn, SELECT_TABLE_SQL)
if result != NROW:
raise Exception('Integer table creation was not successful. Expected %d does not match %d' %(NROW, result))
def drop_database(context, dbname, host=None, port=0, user=None):
LOOPS = 10
if host == None or port == 0 or user == None:
dropdb_cmd = 'dropdb %s' % dbname
else:
dropdb_cmd = 'psql -h %s -p %d -U %s -d template1 -c "drop database %s"' % (host,
port, user, dbname)
for i in range(LOOPS):
context.exception = None
run_gpcommand(context, dropdb_cmd)
if context.exception:
time.sleep(1)
continue
if not check_db_exists(dbname):
return
time.sleep(1)
if context.exception:
raise context.exception
raise Exception('db exists after dropping: %s' % dbname)
def drop_database_if_exists(context, dbname=None, host=None, port=0, user=None):
if check_db_exists(dbname, host=host, port=port, user=user):
drop_database(context, dbname, host=host, port=port, user=user)
def run_on_all_segs(context, dbname, query):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
with dbconn.connect(dbconn.DbURL(dbname=dbname, hostname=seg.getSegmentHostName(), port=seg.getSegmentPort()), utility=True) as conn:
dbconn.execSQL(conn, query)
conn.commit()
def get_nic_up(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='ifconfig nic', cmdStr='sudo /sbin/ifconfig %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
return 'UP' in cmd.get_results().stdout
def bring_nic_down(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='bring down nic', cmdStr='sudo /sbin/ifdown %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
if get_nic_up(hostname, nic):
raise Exception('Unable to bring down nic %s on host %s' % (nic, hostname))
def bring_nic_up(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='bring up nic', cmdStr='sudo /sbin/ifup %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
if not get_nic_up(hostname, nic):
raise Exception('Unable to bring up nic %s on host %s' % (nic, hostname))
def are_segments_synchronized():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.mode != MODE_SYNCHRONIZED:
return False
return True
def get_distribution_policy(dbname):
filename = dbname.strip() + "_dist_policy_backup"
get_dist_policy_to_file(filename, dbname)
def get_dist_policy_to_file(filename, dbname):
dist_policy_sql = " \
SELECT \
c.relname as tablename, p.attrnums as distribution_policy \
FROM \
pg_class c \
INNER JOIN \
gp_distribution_policy p \
ON (c.relfilenode = p.localoid) \
AND \
c.relstorage != 'x' \
ORDER BY c.relname"
current_dir = os.getcwd()
filename = os.path.join(current_dir, './gppylib/test/data', filename)
data_sql = "COPY (%s) TO '%s'" %(dist_policy_sql, filename)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, data_sql)
conn.commit()
def validate_distribution_policy(context, dbname):
filename = dbname.strip() + "_dist_policy_restore"
get_dist_policy_to_file(filename, dbname)
current_dir = os.getcwd()
backup_file = os.path.join(current_dir, './gppylib/test/data', dbname.strip() + "_dist_policy_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', dbname.strip() + "_dist_policy_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def check_row_count(tablename, dbname, nrows):
NUM_ROWS_QUERY = 'select count(*) from %s' % tablename
# We want to bubble up the exception so that if table does not exist, the test fails
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
if result != nrows:
raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
def check_empty_table(tablename, dbname):
check_row_count(tablename, dbname, 0)
def match_table_select(context, src_tablename, src_dbname, dest_tablename, dest_dbname, orderby=None, options=''):
if orderby != None :
dest_tbl_qry = 'psql -d %s -c \'select * from %s order by %s\' %s' % (dest_dbname, dest_tablename, orderby, options)
src_tbl_qry = '''psql -p %s -h %s -U %s -d %s -c \'select * from %s order by %s\' %s''' % (
os.environ.get('GPTRANSFER_SOURCE_PORT'),
os.environ.get('GPTRANSFER_SOURCE_HOST'),
os.environ.get('GPTRANSFER_SOURCE_USER'),
src_dbname, src_tablename, orderby, options)
else:
dest_tbl_qry = 'psql -d %s -c \'select * from %s\' %s' % (dest_dbname, dest_tablename, options)
src_tbl_qry = '''psql -p %s -h %s -U %s -d %s -c \'select * from %s\' %s''' % (
os.environ.get('GPTRANSFER_SOURCE_PORT'),
os.environ.get('GPTRANSFER_SOURCE_HOST'),
os.environ.get('GPTRANSFER_SOURCE_USER'),
src_dbname, src_tablename, options)
(_, dest_content, _) = run_cmd(dest_tbl_qry)
(_, src_content, _) = run_cmd(src_tbl_qry)
if src_content != dest_content:
raise Exception('''table %s in database %s of source system does not match rows with table %s in database %s of destination system.\n
destination table content:\n%s\n
source table content:\n%s\n''' % (
src_tablename,src_dbname, dest_tablename, dest_dbname, dest_content, src_content))
def get_master_hostname(dbname='template1'):
master_hostname_sql = "select distinct hostname from gp_segment_configuration where content=-1 and role='p'"
return getRows(dbname, master_hostname_sql)
def get_hosts_and_datadirs(dbname='template1'):
get_hosts_and_datadirs_sql = "select hostname, fselocation from gp_segment_configuration, pg_filespace_entry where fsedbid = dbid and role='p';"
return getRows(dbname, get_hosts_and_datadirs_sql)
def get_hosts(dbname='template1'):
get_hosts_sql = "select distinct hostname from gp_segment_configuration where role='p';"
return getRows(dbname, get_hosts_sql)
def get_backup_dirs_for_hosts(dbname='template1'):
get_backup_dir_sql = "select hostname,f.fselocation from pg_filespace_entry f inner join gp_segment_configuration g on f.fsedbid=g.dbid and g.role='p'"
results = getRows(dbname, get_backup_dir_sql)
dir_map = {}
for res in results:
host,dir = res
dir_map.setdefault(host,[]).append(dir)
return dir_map
def cleanup_backup_files(context, dbname, location=None):
dir_map = get_backup_dirs_for_hosts(dbname)
for host in dir_map:
if location:
cmd_str = "ssh %s 'DIR=%s;if [ -d \"$DIR/db_dumps/\" ]; then rm -rf $DIR/db_dumps $DIR/gpcrondump.pid; fi'"
cmd = cmd_str % (host, location)
else:
cmd_str = "ssh %s 'for DIR in %s; do if [ -d \"$DIR/db_dumps/\" ]; then rm -rf $DIR/db_dumps $DIR/gpcrondump.pid; fi; done'"
cmd = cmd_str % (host, " ".join(dir_map[host]))
run_command(context, cmd)
if context.exception:
raise context.exception
def cleanup_report_files(context, master_data_dir):
if not master_data_dir:
raise Exception("master_data_dir not specified in cleanup_report_files")
if master_data_dir.strip() == '/':
raise Exception("Can't call cleanup_report_files on root directory")
file_pattern = "gp_*.rpt"
cleanup_cmd = "rm -f %s/%s" % (master_data_dir, file_pattern)
run_command(context, cleanup_cmd)
if context.exception:
raise context.exception
def truncate_table(dbname, tablename):
TRUNCATE_SQL = 'TRUNCATE %s' % tablename
execute_sql(dbname, TRUNCATE_SQL)
def verify_truncate_in_pg_stat_last_operation(context, dbname, oid):
VERIFY_TRUNCATE_SQL = """SELECT *
FROM pg_stat_last_operation
WHERE objid = %d and staactionname = 'TRUNCATE' """ % oid
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
row = dbconn.execSQLForSingletonRow(conn, VERIFY_TRUNCATE_SQL)
if len(row) != 7:
raise Exception('Invalid number of colums %d' % len(row))
if row[2] != 'TRUNCATE':
raise Exception('Actiontype not expected TRUNCATE "%s"' % row[2])
if row[5]:
raise Exception('Subtype for TRUNCATE operation is not empty %s' % row[5])
def verify_truncate_not_in_pg_stat_last_operation(context, dbname, oid):
VERIFY_TRUNCATE_SQL = """SELECT count(*)
FROM pg_stat_last_operation
WHERE objid = %d and staactionname = 'TRUNCATE' """ % oid
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
thecount = dbconn.execSQLForSingleton(conn, VERIFY_TRUNCATE_SQL)
if thecount != 0:
raise Exception("Found %s rows from query '%s' should be 0" % (thecount, VERIFY_TRUNCATE_SQL))
def get_table_oid(context, dbname, schema, tablename):
OID_SQL = """SELECT c.oid
FROM pg_class c, pg_namespace n
WHERE c.relnamespace = n.oid AND c.relname = '%s' AND n.nspname = '%s'""" % (tablename, schema)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
oid = dbconn.execSQLForSingleton(conn, OID_SQL)
return oid
def insert_numbers(dbname, tablename, lownum, highnum):
sql = "insert into %s select generate_series(%s, %s)" % (tablename, lownum, highnum)
execute_sql(dbname, sql)
def verify_integer_tuple_counts(context, filename):
with open(filename, 'r') as fp:
for line in fp:
tupcount = line.split(',')[-1].strip()
if re.match("^\d+?\.\d+?$", tupcount) is not None:
raise Exception('Expected an integer tuplecount in file %s found float' % filename)
def create_fake_pg_aoseg_table(context, table, dbname):
sql = """CREATE TABLE %s(segno int,
eof double precision,
tupcount double precision,
modcount bigint,
varblockcount double precision,
eofuncompressed double precision)""" % table
execute_sql(dbname, sql)
def insert_row(context, row_values, table, dbname):
sql = """INSERT INTO %s values(%s)""" % (table, row_values)
execute_sql(dbname, sql)
def copy_file_to_all_db_hosts(context, filename):
hosts_set = set()
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for seg in gparray.getDbList():
if seg.isSegmentPrimary():
hosts_set.add(seg.getSegmentAddress())
hostfile = '/tmp/copy_host_file.behave'
with open(hostfile, 'w') as fd:
for h in hosts_set:
fd.write('%s\n' % h)
cmd = 'gpscp -f %s %s =:%s' % (hostfile, filename, filename)
run_command(context, cmd)
if context.exception:
raise Exception("FAIL: '%s' '%s'" % (cmd, context.exception.__str__()))
os.remove(hostfile)
def create_large_num_partitions(table_type, table_name, db_name, num_partitions=None):
if table_type == "ao":
condition = "with(appendonly=true)"
elif table_type == "co":
condition = "with(appendonly=true, orientation=column)"
else:
condition = ""
if num_partitions is None:
create_large_partitions_sql = """
create table %s (column1 int, column2 int) %s partition by range(column1) subpartition by range(column2) subpartition template(start(1) end(75) every(1)) (start(1) end(75) every(1))
""" % (table_name, condition)
else:
create_large_partitions_sql = """
create table %s (column1 int, column2 int) %s partition by range(column1) (start(1) end(%d) every(1))
""" % (table_name, condition, num_partitions)
execute_sql(db_name, create_large_partitions_sql)
if '.' in table_name:
schema, table = table_name.split('.')
verify_table_exists_sql = """select count(*) from pg_class c, pg_namespace n
where c.relname = E'%s' and n.nspname = E'%s' and c.relnamespace = n.oid;
""" % (table, schema)
else:
verify_table_exists_sql = """select count(*) from pg_class where relname = E'%s'""" % table_name
num_rows = getRows(db_name, verify_table_exists_sql)[0][0]
if num_rows != 1:
raise Exception('Creation of table "%s:%s" failed. Num rows in pg_class = %s' % (db_name, table_name, num_rows))
def validate_num_restored_tables(context, num_tables, dbname):
tbls = get_table_names(dbname)
count_query = """select count(*) from %s"""
num_validate_tables = 0
for t in tbls:
name = '%s.%s' % (t[0], t[1])
count = getRows(dbname, count_query % name)[0][0]
if count == 0:
continue
else:
validate_restore_data(context, name, dbname)
num_validate_tables += 1
if num_validate_tables != int(num_tables.strip()):
raise Exception('Invalid number of tables were restored. Expected "%s", Actual "%s"' % (num_tables, num_validate_tables))
def get_partition_list(partition_type, dbname):
if partition_type == 'ao':
sql = GET_ALL_AO_DATATABLES_SQL
elif partition_type == 'co':
sql = GET_ALL_CO_DATATABLES_SQL
partition_list = getRows(dbname, sql)
for line in partition_list:
if len(line) != 4:
raise Exception('Invalid results from query to get all AO tables: [%s]' % (','.join(line)))
return partition_list
def verify_stats(dbname, partition_info):
for (oid, schemaname, partition_name, tupletable) in partition_info:
tuple_count_sql = "select to_char(sum(tupcount::bigint), '999999999999999999999') from pg_aoseg.%s" % tupletable
tuple_count = getRows(dbname, tuple_count_sql)[0][0]
if tuple_count:
tuple_count = tuple_count.strip()
else:
tuple_count = '0'
validate_tuple_count(dbname, schemaname, partition_name, tuple_count)
def validate_tuple_count(dbname, schemaname, partition_name, tuple_count):
sql = 'select count(*) from %s.%s' % (schemaname, partition_name)
row_count = getRows(dbname, sql)[0][0]
if int(row_count) != int(tuple_count):
raise Exception('Stats for the table %s.%s does not match. Stat count "%s" does not match the actual tuple count "%s"' % (schemaname, partition_name, tuple_count, row_count))
def validate_aoco_stats(context, dbname, table, expected_tupcount):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
schema, table = table.split('.')
sql = "SELECT relname FROM pg_class \
WHERE oid in (SELECT segrelid FROM pg_appendonly \
WHERE relid in (SELECT oid FROM pg_class \
WHERE relname = '%s' AND relnamespace = (SELECT oid FROM pg_namespace \
WHERE nspname = '%s')))" % (table, schema)
tname = dbconn.execSQLForSingleton(conn, sql)
sql = "select sum(tupcount) from pg_aoseg.%s" % tname.strip()
rows = getRows(dbname, sql)
tupcount = int(rows[0][0])
if tupcount != int(expected_tupcount):
raise Exception("%s has stats of %d rows in %s table and should have %s" % (table, tupcount, tname, expected_tupcount))
def validate_no_aoco_stats(context, dbname, table):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
sql = "select relname from pg_class where oid in (select segrelid from pg_appendonly where relid in (select oid from pg_class where relname = '%s'))" % table
tname = dbconn.execSQLForSingleton(conn, sql)
sql = "select tupcount from pg_aoseg.%s" % tname.strip()
rows = getRows(dbname, sql)
if len(rows) != 0:
raise Exception("%s has stats of %d rows in %s table and should be 0" % (table, int(rows[0][0]), tname))
def get_all_hostnames_as_list(context, dbname):
hosts = []
segs = get_segment_hostnames(context, dbname)
for seg in segs:
hosts.append(seg[0].strip())
masters = get_master_hostname(dbname)
for master in masters:
hosts.append(master[0].strip())
return hosts
def get_pid_for_segment(seg_data_dir, seg_host):
cmd = Command(name='get list of postmaster processes',
cmdStr='ps -eaf | grep %s' % seg_data_dir,
ctxt=REMOTE,
remoteHost=seg_host)
cmd.run(validateAfter=True)
pid = None
results = cmd.get_results().stdout.strip().split('\n')
for res in results:
if 'grep' not in res:
pid = res.split()[1]
if pid is None:
return None
return int(pid)
def install_gppkg(context):
if 'GPPKG_PATH' not in os.environ:
raise Exception('GPPKG_PATH needs to be set in the environment to install gppkg')
if 'GPPKG_NAME' not in os.environ:
raise Exception('GPPKG_NAME needs to be set in the environment to install gppkg')
gppkg_path = os.environ['GPPKG_PATH']
gppkg_name = os.environ['GPPKG_NAME']
command = "gppkg --install %s/%s.gppkg" % (gppkg_path, gppkg_name)
run_command(context, command)
print "Install gppkg command: '%s', stdout: '%s', stderr: '%s'" % (command, context.stdout_message, context.error_message)
def enable_postgis_and_load_test_data_for_postgis_1(context):
if 'GPHOME' not in os.environ:
raise Exception('GPHOME needs to be set in the environment')
install_gppkg(context)
gphome = os.environ['GPHOME']
path = "%s/share/postgresql/contrib" % gphome
command = "psql -d opengeo -f %s/postgis.sql" % path
run_command(context, command)
command = "psql -d opengeo -f %s/spatial_ref_sys.sql" % path
run_command(context, command)
current_path = os.path.realpath(__file__)
current_dir = os.path.dirname(current_path)
postgis_data_dir = "%s/../behave/mgmt_utils/steps/data/postgis" % current_dir
command = "psql -d opengeo -f %s/nyc_census_blocks_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_neighborhoods_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_subway_stations_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_census_sociodata.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_streets_1.sql" % postgis_data_dir
run_command(context, command)
def enable_postgis_and_load_test_data(context):
if 'GPHOME' not in os.environ:
raise Exception('GPHOME needs to be set in the environment')
install_gppkg(context)
gphome = os.environ['GPHOME']
path = "%s/share/postgresql/contrib/postgis-2.0" % gphome
command = "psql -d opengeo -f %s/postgis.sql" % path
run_command(context, command)
command = "psql -d opengeo -f %s/spatial_ref_sys.sql" % path
run_command(context, command)
current_path = os.path.realpath(__file__)
current_dir = os.path.dirname(current_path)
postgis_data_dir = "%s/../behave/mgmt_utils/steps/data/postgis" % current_dir
command = "psql -d opengeo -f %s/nyc_census_blocks.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_neighborhoods.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_subway_stations.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_census_sociodata.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_streets.sql" % postgis_data_dir
run_command(context, command)
def kill_process(pid, host=None, sig=signal.SIGTERM):
if host is not None:
cmd = Command('kill process on a given host',
cmdStr='kill -%d %d' % (sig, pid),
ctxt=REMOTE,
remoteHost=host)
cmd.run(validateAfter=True)
else:
os.kill(pid, sig)
def get_num_segments(primary=True, mirror=True, master=True, standby=True):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segments = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
mirror_segments = [seg for seg in gparray.getDbList() if seg.isSegmentMirror()]
num_segments = 0
if primary:
num_segments += len(primary_segments)
if mirror:
num_segments += len(mirror_segments)
if master and gparray.master is not None:
num_segments += 1
if standby and gparray.standbyMaster is not None:
num_segments += 1
return num_segments
def check_user_permissions(file_name, access_mode):
st = os.stat(file_name)
if access_mode == 'write':
return bool(st.st_mode & stat.S_IWUSR)
elif access_mode == 'read':
return bool(st.st_mode & stat.S_IRUSR)
elif access_mode == 'execute':
return bool(st.st_mode & stat.S_IXUSR)
else:
raise Exception('Invalid mode specified, should be read, write or execute only')
def get_change_tracking_segment_info():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for seg in gparray.getDbList():
if seg.isSegmentModeInChangeLogging():
return seg.getSegmentPort(), seg.getSegmentHostName()
def are_segments_running():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.status != 'u':
return False
return True
def modify_sql_file(file, hostport):
if os.path.isfile(file):
for line in fileinput.FileInput(file,inplace=1):
if line.find("gpfdist")>=0:
line = re.sub('(\d+)\.(\d+)\.(\d+)\.(\d+)\:(\d+)',hostport, line)
print str(re.sub('\n','',line))
def create_gpfilespace_config(host, port, user,fs_name, config_file, working_dir='/tmp'):
mirror_hosts = []
primary_hosts = []
standby_host = ''
master_host = ''
fspath_master = working_dir + '/fs_master'
fspath_standby = working_dir + '/fs_standby'
fspath_primary = working_dir + '/fs_primary'
fspath_mirror = working_dir + '/fs_mirror'
get_master_filespace_entry = 'psql -t -h %s -p %s -U %s -d template1 -c \" select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'p\' and content=-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_master_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_master_filespace_entry)
else:
file = open(config_file,'w')
file.write('filespace:%s\n'%fs_name)
result = out.split('\n')
for line in result:
if line.strip():
row = line.split('|')
row = [col.strip() for col in row]
hostname = row[0]
master_host = hostname
dbid = row[1]
fs_loc = os.path.join(fspath_master,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_standby_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'m\' and content=-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_standby_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_standby_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
standby_host= hostname
dbid = row[1]
fs_loc = os.path.join(fspath_standby,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_primary_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'p\' and content>-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_primary_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_primary_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
primary_hosts.append(hostname)
dbid = row[1]
fs_loc = os.path.join(fspath_primary,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_mirror_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'m\' and content>-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_mirror_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_mirror_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
mirror_hosts.append(hostname)
dbid = row[1]
fs_loc = os.path.join(fspath_mirror,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
for host in primary_hosts:
remove_dir(host,fspath_primary)
create_dir(host,fspath_primary)
for host in mirror_hosts:
remove_dir(host,fspath_mirror)
create_dir(host,fspath_mirror)
remove_dir(master_host,fspath_master)
remove_dir(standby_host,fspath_standby)
create_dir(master_host,fspath_master)
create_dir(standby_host,fspath_standby)
def remove_dir(host, directory):
cmd = 'gpssh -h %s -e \'rm -rf %s\''%(host, directory)
run_cmd(cmd)
def create_dir(host, directory):
cmd = 'gpssh -h %s -e \'mkdir -p %s\''%(host, directory)
run_cmd(cmd)
def wait_till_change_tracking_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_ct_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode =\'c\';"'%(host, port, user)
(rc, out, err) = run_cmd(num_ct_nodes)
if rc != 0:
raise Exception('Exception from executing psql query: %s'%num_ct_nodes)
else:
num_cl = int(out.strip())
count = 0
while(num_cl == 0):
time.sleep(30)
(rc, out, err) = run_cmd(num_ct_nodes)
num_cl = int(out.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in change tracking")
return (True,num_cl)
def wait_till_insync_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_unsync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode <> \'s\' or status<> \'u\';"'%(host, port, user)
(rc, out, err) = run_cmd(num_unsync_nodes)
if rc != 0:
raise Exception('Exception from executing psql query: %s'%num_unsync_nodes)
else:
num_unsync = int(out.strip())
count = 0
while(num_unsync > 0):
time.sleep(30)
(rc, out, err) = run_cmd(num_unsync_nodes)
num_unsync = int(out.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in sync transition")
return True
def wait_till_resync_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_resync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode =\'r\';"'%(host, port, user)
num_insync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode <>\'s\';"'%(host, port, user)
(rc1, out1, err1) = run_cmd(num_resync_nodes)
(rc2, out2, err2) = run_cmd(num_insync_nodes)
if rc1 !=0 or rc2 !=0:
raise Exception('Exception from executing psql query: %s'%num_unsync_nodes)
else:
num_resync = int(out1.strip())
num_insync = int(out2.strip())
count = 0
while(num_resync != num_insync):
time.sleep(30)
(rc1, out1, err1) = run_cmd(num_resync_nodes)
(rc2, out2, err2) = run_cmd(num_insync_nodes)
num_resync = int(out1.strip())
num_insync = int(out2.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in sync transition")
return True
def check_dump_dir_exists(context, dbname):
dir_map = get_backup_dirs_for_hosts(dbname)
cmd_str = "ssh %s 'for DIR in %s; do if [ -d \"$DIR/db_dumps/\" ]; then echo \"$DIR EXISTS\"; else echo \"$DIR NOT FOUND\"; fi; done'"
for host in dir_map:
cmd = cmd_str % (host, " ".join(dir_map[host]))
run_command(context, cmd)
if context.exception:
raise context.exception
if 'EXISTS' in context.stdout_message:
raise Exception("db_dumps directory is present in master/segments.")
def verify_restored_table_is_analyzed(context, table_name, dbname):
ROW_COUNT_SQL = """SELECT count(*) FROM %s""" % table_name
if table_name.find('.') != -1:
schema_name,table_name = table_name.split(".")
else:
schema_name = 'public'
schema_name = pg.escape_string(schema_name)
table_name = pg.escape_string(table_name)
ROW_COUNT_PG_CLASS_SQL = """SELECT reltuples FROM pg_class WHERE relname = '%s'
AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '%s')""" % (table_name, schema_name)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, ROW_COUNT_SQL)
rows = curs.fetchall()
curs = dbconn.execSQL(conn, ROW_COUNT_PG_CLASS_SQL)
rows_from_pgclass = curs.fetchall()
if rows == rows_from_pgclass:
return True
else:
return False
def analyze_database(context, dbname):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, "analyze")
def delete_rows_from_table(context, dbname, table_name, column_name, info):
DELETE_SQL = """DELETE FROM %s WHERE %s = %s""" % (table_name, column_name, info)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, DELETE_SQL)
conn.commit()
def validate_parse_email_file(context, email_file_path):
if os.path.isfile(email_file_path) is False:
raise Exception("\'%s\' file does not exist." % email_file_path)
if email_file_path.split('.')[1] != "yaml":
raise Exception("\'%s\' is not \'.yaml\' file. File containing email details should be \'.yaml\' file." % email_file_path)
if (os.path.getsize(email_file_path) > 0) is False:
raise Exception("\'%s\' file is empty." % email_file_path)
email_key_list = ["DBNAME","FROM", "SUBJECT"]
try:
with open(email_file_path, 'r') as f:
doc = yaml.load(f)
context.email_details = doc['EMAIL_DETAILS']
for email in context.email_details:
for key in email.keys():
if key not in email_key_list:
raise Exception(" %s not present" % key)
except Exception as e:
raise Exception("\'%s\' file is not formatted properly." % email_file_path)
def check_count_for_specific_query(dbname, query, nrows):
NUM_ROWS_QUERY = '%s' % query
# We want to bubble up the exception so that if table does not exist, the test fails
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
if result != nrows:
raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
| {
"content_hash": "4e050683937c919adf0568832196172f",
"timestamp": "",
"source": "github",
"line_count": 1438,
"max_line_length": 246,
"avg_line_length": 44.50556328233658,
"alnum_prop": 0.6194315536180253,
"repo_name": "foyzur/gpdb",
"id": "86aefb7d2c44e4ff8f0fd72eb2ead89e0639e754",
"size": "64021",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/gppylib/test/behave_utils/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5196"
},
{
"name": "Batchfile",
"bytes": "11532"
},
{
"name": "C",
"bytes": "34176888"
},
{
"name": "C++",
"bytes": "4798415"
},
{
"name": "CMake",
"bytes": "28254"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Cucumber",
"bytes": "896816"
},
{
"name": "DTrace",
"bytes": "1154"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "Groff",
"bytes": "601878"
},
{
"name": "HTML",
"bytes": "340701"
},
{
"name": "Java",
"bytes": "943457"
},
{
"name": "Lex",
"bytes": "202575"
},
{
"name": "M4",
"bytes": "94554"
},
{
"name": "Makefile",
"bytes": "463264"
},
{
"name": "Objective-C",
"bytes": "7388"
},
{
"name": "PLSQL",
"bytes": "174787"
},
{
"name": "PLpgSQL",
"bytes": "47986854"
},
{
"name": "Perl",
"bytes": "778165"
},
{
"name": "Python",
"bytes": "5461340"
},
{
"name": "Ruby",
"bytes": "3283"
},
{
"name": "SQLPL",
"bytes": "122363"
},
{
"name": "Shell",
"bytes": "471931"
},
{
"name": "XS",
"bytes": "8309"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "471668"
}
],
"symlink_target": ""
} |
from setuptools import setup
from setuptools.command.upload import upload
import os
class ReleaseToPyPICommand(upload):
def finalize_options(self):
self.repository = 'https://upload.pypi.org/legacy/'
self.username = os.environ['PYPI_USERNAME']
self.password = os.environ['PYPI_PASSWORD']
setup(
name='djangoevents',
version='0.14.1',
url='https://github.com/ApplauseOSS/djangoevents',
license='MIT',
description='Building blocks for building Event Sourcing Django applications.',
author='Applause',
author_email='[email protected]',
zip_safe=False,
packages=[
'djangoevents',
'djangoevents.migrations',
'djangoevents.tests.settings',
],
include_package_data=True,
install_requires=[
'eventsourcing>=1.2,<1.3',
'django',
'avro-python3==1.7.7',
'stringcase==1.0.6',
],
cmdclass={
'release_to_pypi': ReleaseToPyPICommand
}
)
| {
"content_hash": "6116b15a62174acb2b3324666c25817d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 83,
"avg_line_length": 25.736842105263158,
"alnum_prop": 0.6380368098159509,
"repo_name": "ApplauseOSS/djangoevents",
"id": "462bf0fc8cf0520dc87ccd38ac5112889e74c1a4",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2304"
},
{
"name": "Python",
"bytes": "59572"
},
{
"name": "Shell",
"bytes": "137"
}
],
"symlink_target": ""
} |
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import struct
import time
import xxhash
import ctypes
import inspect
import os
import logging
from binascii import unhexlify
class ConstReflect(dict):
def __init__(self):
super(ConstReflect, self).__init__()
def determineRoutine(attribute):
return not(inspect.isroutine(attribute))
attributes = inspect.getmembers(type(self), determineRoutine)
for attribute in attributes:
if attribute[0].isupper():
self[attribute[1]] = attribute[0]
def setupLogger():
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'Line %(lineno)d,%(filename)s- %(asctime)s- %(levelname)s- %(message)s'
)
ch.setFormatter(formatter)
logger.addHandler(ch)
def f2i(f):
return struct.unpack('<Q', struct.pack('<d', f))[0]
def f2h(f):
return hex(struct.unpack('<Q', struct.pack('<d', f))[0])
def h2f(h):
return struct.unpack('<d', struct.pack('<Q', int(h, 16)))[0]
def d2h(f):
hex_str = f2h(f)[2:].replace('L', '')
hex_str = ("0" * (len(hex_str) % 2)) + hex_str
return unhexlify(hex_str)
def encodeLocation(loc):
return (f2i(loc.latitude), f2i(loc.longitude), f2i(loc.altitude))
def getMs():
return int(round(time.time() * 1000))
def hashLocation(authTicket, latitude, longitude, altitude):
baseHash = xxhash.xxh32(
authTicket.SerializeToString(),
seed=0x1B845238
).intdigest()
# Format location
locationBytes = d2h(latitude) + d2h(longitude) + d2h(altitude)
# Using serialized Auth Ticket
hashA = xxhash.xxh32(locationBytes, seed=baseHash).intdigest()
# Hash of location using static seed 0x1B845238
hashB = xxhash.xxh32(locationBytes, seed=0x1B845238).intdigest()
return hashA, hashB
def hashRequests(authTicket, payload):
baseHash = xxhash.xxh64(
authTicket.SerializeToString(),
seed=0x1B845238
).intdigest()
# Serialize and hash each request
return [xxhash.xxh64(
request.SerializeToString(),
seed=baseHash
).intdigest() for request in payload]
# Assuming the encrypt.dll file floating around out there
def hashSignature(signature, libraryPath):
serialized = signature.SerializeToString()
size = len(serialized)
library = ctypes.cdll.LoadLibrary(libraryPath)
library.argtypes = [
ctypes.c_char_p, # const unsigned char *input
ctypes.c_size_t, # size_t input_size
ctypes.c_char_p, # const unsigned char *iv
ctypes.c_size_t, # size_t *iv_size
ctypes.POINTER(ctypes.c_ubyte), # unsigned char * output
ctypes.POINTER(ctypes.c_size_t) # size_t* output_size
]
library.restype = ctypes.c_int # Retun int
iv = os.urandom(32)
outputSize = ctypes.c_size_t()
# Hash sig
library.encrypt(serialized, size, iv, 32, None, ctypes.byref(outputSize))
output = (ctypes.c_ubyte * outputSize.value)()
# Call lib
library.encrypt(
serialized,
size,
iv,
32,
ctypes.byref(output),
ctypes.byref(outputSize)
)
return b''.join(map(chr, output))
| {
"content_hash": "b0a7e3f721acc548177d5837392ae98a",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 29.05298013245033,
"alnum_prop": 0.6826988830635969,
"repo_name": "rubenvereecken/pokemongo-api",
"id": "fc8ce0412c40ae4a2c3d31cc21a2cea99c1887e6",
"size": "4387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pogo/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76736"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
"""Review Board feature checkers."""
from __future__ import unicode_literals
from djblets.features.checkers import SiteConfigFeatureChecker
class RBFeatureChecker(SiteConfigFeatureChecker):
"""Feature checker that checks against a LocalSite's configuration.
Features can be enabled/disabled on a per-LocalSite basis by setting
the specified feature ID to either ``True`` or ``False`` in the
``enabled_features`` key in that LocalSite's
:py:attr:`~reviewboard.sites.models.LocalSite.extra_data`` field.
If the key is absent, this checker will check against the site
configuration (and then the Django settings) to see if it is enabled or
disabled globally.
"""
EXTRA_DATA_KEY = SiteConfigFeatureChecker.siteconfig_key
def is_feature_enabled(self, feature_id, **kwargs):
"""Return whether a feature is enabled for a given ID.
Args:
feature_id (unicode):
The unique identifier of the feature whose status is to be
determined.
**kwargs (dict):
Additional keyword arguments.
Keyword Args:
request (django.http.HttpRequest):
An optional request. If this request is made against a
LocalSite, that LocalSite will be used to look up the feature.
Either this argument or ``local_site`` must be provided to
enable checking against a LocalSite.
local_site (reviewboard.site.models.LocalSite):
An optional local site. If provided, this LocalSite will be
used to look up the status of the requested feature.
Either this argument or ``request`` must be provided to enable
checking against a LocalSite.
Returns:
bool:
Whether or not the feature is enabled.
"""
local_site = kwargs.get('local_site')
request = kwargs.get('request')
if (local_site is None and
request is not None and
hasattr(request, 'local_site')):
local_site = request.local_site
if local_site and local_site.extra_data:
try:
return local_site.extra_data[self.EXTRA_DATA_KEY][feature_id]
except KeyError:
pass
return super(RBFeatureChecker, self).is_feature_enabled(feature_id,
**kwargs)
| {
"content_hash": "7e8a539fe2544983a671cab7f321600b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 36.544117647058826,
"alnum_prop": 0.6120724346076458,
"repo_name": "brennie/reviewboard",
"id": "df595097e4f68c115b6d42ba4ee05147e93b6134",
"size": "2485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/features/checkers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "247208"
},
{
"name": "HTML",
"bytes": "204351"
},
{
"name": "JavaScript",
"bytes": "2557855"
},
{
"name": "Python",
"bytes": "5241630"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
} |
import json
from django.template.loader import render_to_string
__version__ = (0, 1, 2)
class SirTrevorContent(str):
@property
def html(self):
html = []
content = json.loads(self)
for block in content['data']:
template_name = 'sirtrevor/blocks/%s.html' % block['type']
html.append(render_to_string(template_name, block['data']))
return ''.join(html)
| {
"content_hash": "e8a6830774896124d6fbfc0842da6b4c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 71,
"avg_line_length": 26.125,
"alnum_prop": 0.6028708133971292,
"repo_name": "Tarun12345/rat-notes",
"id": "11c9091764581b7006e84bc2d346a653086b3fd3",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sirtrevor/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70695"
},
{
"name": "JavaScript",
"bytes": "85493"
},
{
"name": "Python",
"bytes": "87237"
},
{
"name": "Shell",
"bytes": "6714"
}
],
"symlink_target": ""
} |
"""This module exports the Htmlhint plugin class."""
import logging
import sublime
from SublimeLinter.lint import LintMatch, NodeLinter
logger = logging.getLogger("SublimeLinter.plugin.htmlhint")
class Htmlhint(NodeLinter):
"""Provides an interface to htmlhint."""
cmd = ("htmlhint", "--format", "json", "--nocolor", "stdin")
defaults = {"selector": "text.html"}
def find_errors(self, output):
"""
Override find_errors, parsing output json into json_object.
Calls parse_message for each error found.
"""
output_json = sublime.decode_value(output)
logger.debug('output_json:"%s", file: "%s"', output_json, self.filename)
for file in output_json:
for message in file["messages"]:
yield self.parse_message(message)
def parse_message(self, message):
"""Parse message object into standard elements of an error and return them."""
error_message = message["message"]
line = message["line"] - 1
col = message["col"]
error_type = message["type"]
# ignore message type of info
if error_type == "info":
message = None
logger.info(
'message -- msg:"%s", line:%s, col:%s, type: %s, message_obj:%s',
error_message,
line,
col,
error_type,
message,
)
return LintMatch(
filename=self.filename,
line=line,
col=col,
error_type=error_type,
code=message.get("rule", {}).get("id", ""),
message=error_message,
match=str(message),
)
| {
"content_hash": "b3f0315f35129f7f61f2c037580e023e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 86,
"avg_line_length": 28.066666666666666,
"alnum_prop": 0.5605700712589073,
"repo_name": "mmaday/SublimeLinter-contrib-htmlhint",
"id": "b63018bbc33b13a7bf41d5ed6dd58bbd35016b7f",
"size": "1850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2036"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2014 - 2016 Mohab Usama
"""
import logging
VERSION = '0.11'
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.handlers = [logging.StreamHandler()]
| {
"content_hash": "c26c0e4cd9279636d120202da9d83e5d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 43,
"avg_line_length": 14.466666666666667,
"alnum_prop": 0.7004608294930875,
"repo_name": "mohabusama/pyguacamole",
"id": "10e28abb06963440c172e3f2f2fef0987f1c59a0",
"size": "217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guacamole/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22138"
}
],
"symlink_target": ""
} |
listen_ip = '' # empty stays for 0.0.0.0
listen_port = 9090
connections = 30
target_server = 'kirito.la.net.ua'
from gevent.pywsgi import WSGIServer
from gevent.pool import Pool
from m3u8_streamer import M3u8Streamer
from urlparse import urljoin
class ResultWrapper(object):
def __init__(self, data, close):
self._data = data
self.close = close
def __str__(self):
return self._data
def __iter__(self):
for chunk in self._data:
yield chunk
def __len__(self):
return self._data.__len__()
def application(env, start_response):
start_response('200 ok', [('Content-type', 'video/MP2T'), ('Connection', 'Keep-alive')])
streamer = M3u8Streamer(urljoin(
'http://' + target_server,
env['PATH_INFO'].replace('.ts', '.m3u8'))
)
for data in streamer.iter_content():
print '-----------get {} bytes'.format(len(data))
if data:
yield ResultWrapper(data, streamer.stop)
pass
if __name__ == '__main__':
print 'Starting on {}:{}'.format(listen_ip, listen_port)
WSGIServer((listen_ip, listen_port), application, spawn=Pool(connections)).serve_forever()
| {
"content_hash": "50aca115ac2df85825b6fc162ab04107",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 94,
"avg_line_length": 26.954545454545453,
"alnum_prop": 0.6104553119730185,
"repo_name": "rain87/hls-glue",
"id": "87dff38df61ad9cdb539452ef6ca9f5f3d35405e",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/hls_glue.gevent.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "494"
},
{
"name": "Python",
"bytes": "7929"
}
],
"symlink_target": ""
} |
from django.db import models
from django.core.validators import validate_unicode_slug
import sys
from functools import partial
from collections import defaultdict
from abc import ABC, abstractmethod
from django.core.exceptions import ValidationError
from django.utils import timezone
from datetime import timedelta
class User(models.Model):
name = models.CharField(max_length=128, primary_key=True,
validators=[validate_unicode_slug])
def __str__(self):
return self.name
class Asado(models.Model):
organizer = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='Organizador'
)
attendee = models.ManyToManyField(
User,
through='Invitation',
related_name='Invitados'
)
datetime = models.DateTimeField()
place = models.CharField(max_length=128, default='')
@property
def estimated_cost(self):
estimated_by_items = sum(
[assignment.estimated_cost() for assignment in
self.shop_list.all()]
)
return "$ %s" % estimated_by_items
class Meta:
ordering = ['datetime']
DATETIME_VALIDATION_ERROR = 'La fecha no debe haber pasado'
def clean(self):
if self.datetime < timezone.now():
raise ValidationError(
{'datetime': self.__class__.DATETIME_VALIDATION_ERROR}
)
def save(self, *args, **kwargs):
self.full_clean()
super().save(*args, **kwargs)
class Invitation(models.Model):
ORGANIZER_AS_INVITE_ERROR_MSG = 'Organizer cannot be invited'
invite = models.ForeignKey(User, on_delete=models.CASCADE)
asado = models.ForeignKey(Asado, on_delete=models.CASCADE)
def clean(self):
if self.invite == self.asado.organizer:
raise ValidationError(
{'invite': self.__class__.ORGANIZER_AS_INVITE_ERROR_MSG}
)
def save(self, *args, **kwargs):
self.full_clean()
return super().save(*args, **kwargs)
class Supply(models.Model):
VALID_OPTIONS = (
('drink', 'Drink'),
('food', 'Food'),
)
kind = models.CharField(max_length=64, choices=VALID_OPTIONS)
description = models.CharField(max_length=128, unique=True)
estimated_cost = models.DecimalField(max_digits=7, decimal_places=2)
@property
def cost(self):
return "$%s" % self.estimated_cost
def __str__(self):
return self.description + ' ' + self.cost
class Assignment(models.Model):
designated_user = models.ForeignKey(User, on_delete=models.CASCADE)
comment = models.CharField(max_length=256, default='')
fullfilled = models.BooleanField(default=False)
asado = models.ForeignKey(Asado, related_name='shop_list')
required_supply = models.ForeignKey(Supply, on_delete=models.CASCADE)
required_quantity = models.IntegerField(default=0)
def estimated_cost(self):
return self.required_quantity * self.required_supply.estimated_cost
def finished_with(self, quantity):
rule = AssignmentValidationRule.rule_for(
self.required_supply
)
self.fullfilled = rule.fullfills_for(self, quantity)
self.save()
class AssignmentValidationError(Exception):
def __init__(self, message):
self.message = message
class AssignmentValidationRule(ABC):
@classmethod
def rule_for(cls, a_supply):
try:
return next(
rule for rule in cls.__subclasses__() if
rule.works_for(a_supply)
)
except StopIteration as e:
raise AssignmentValidationError(
'You must provide a rule for'
'this kind of supply ' + a_kind_of_supply
)
@classmethod
@abstractmethod
def works_for(a_supply):
pass
@abstractmethod
def fullfills_for(an_assignment, a_quantity):
pass
class FoodAssignmentValidationRule(AssignmentValidationRule):
def works_for(a_supply):
return a_supply.kind == 'food'
def fullfills_for(an_assignment, a_quantity):
if a_quantity == an_assignment.required_quantity:
return True
else:
ERROR_MESSAGE = (
'Deben confirmarse todas las unidas requeridas de comida'
)
raise AssignmentValidationError(ERROR_MESSAGE)
class DrinkAssignmentValidationRule(AssignmentValidationRule):
def works_for(a_supply):
return a_supply.kind == 'drink'
def fullfills_for(an_assignment, a_quantity):
return True
| {
"content_hash": "8c407f344ca4f95408d51990a7537c92",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 75,
"avg_line_length": 28.337423312883434,
"alnum_prop": 0.6332539510716605,
"repo_name": "TenStrings/organizador-de-asados",
"id": "2e31228d7eaa450217138ffe8f28b525e9b10d95",
"size": "4619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asados/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "977"
},
{
"name": "HTML",
"bytes": "7152"
},
{
"name": "JavaScript",
"bytes": "310"
},
{
"name": "Python",
"bytes": "34651"
}
],
"symlink_target": ""
} |
import os
import re
import json
import requests
import signal
import httpretty
from freezegun import freeze_time
from contextlib import contextmanager
from sure import within, miliseconds, expect
from tornado import version as tornado_version
from httpretty import HTTPretty, httprettified
from httpretty.core import decode_utf8
from tests.functional.base import FIXTURE_FILE, use_tornado_server
from tests.compat import Mock
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
server_url = lambda path, port: "http://localhost:{}/{}".format(port, path.lstrip('/'))
@httprettified
@within(two=miliseconds)
def test_httpretty_should_mock_a_simple_get_with_requests_read(now):
"HTTPretty should mock a simple GET with requests.get"
HTTPretty.register_uri(HTTPretty.GET, "http://yipit.com/",
body="Find the best daily deals")
response = requests.get('http://yipit.com')
expect(response.text).to.equal('Find the best daily deals')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/')
@httprettified
@within(two=miliseconds)
def test_hostname_case_insensitive(now):
"HTTPretty should match the hostname case insensitive"
HTTPretty.register_uri(HTTPretty.GET, "http://yipit/",
body="Find the best daily deals")
response = requests.get('http://YIPIT')
expect(response.text).to.equal('Find the best daily deals')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/')
@httprettified
@within(two=miliseconds)
def test_httpretty_provides_easy_access_to_querystrings(now):
"HTTPretty should provide an easy access to the querystring"
HTTPretty.register_uri(HTTPretty.GET, "http://yipit.com/",
body="Find the best daily deals")
requests.get('http://yipit.com/?foo=bar&foo=baz&chuck=norris')
expect(HTTPretty.last_request.querystring).to.equal({
'foo': ['bar', 'baz'],
'chuck': ['norris'],
})
@httprettified
@freeze_time("2013-10-04 04:20:00")
def test_httpretty_should_mock_headers_requests():
"HTTPretty should mock basic headers with requests"
HTTPretty.register_uri(HTTPretty.GET, "http://github.com/",
body="this is supposed to be the response",
status=201)
response = requests.get('http://github.com')
expect(response.status_code).to.equal(201)
expect(dict(response.headers)).to.equal({
'content-type': 'text/plain; charset=utf-8',
'connection': 'close',
'content-length': '35',
'status': '201',
'server': 'Python/HTTPretty',
'date': 'Fri, 04 Oct 2013 04:20:00 GMT',
})
@httprettified
@freeze_time("2013-10-04 04:20:00")
def test_httpretty_should_allow_adding_and_overwritting_requests():
"HTTPretty should allow adding and overwritting headers with requests"
HTTPretty.register_uri(HTTPretty.GET, "http://github.com/foo",
body="this is supposed to be the response",
adding_headers={
'Server': 'Apache',
'Content-Length': '27',
'Content-Type': 'application/json',
})
response = requests.get('http://github.com/foo')
expect(dict(response.headers)).to.equal({
'content-type': 'application/json',
'connection': 'close',
'content-length': '27',
'status': '200',
'server': 'Apache',
'date': 'Fri, 04 Oct 2013 04:20:00 GMT',
})
@httprettified
@within(two=miliseconds)
def test_httpretty_should_allow_forcing_headers_requests(now):
"HTTPretty should allow forcing headers with requests"
HTTPretty.register_uri(HTTPretty.GET, "http://github.com/foo",
body="<root><baz /</root>",
forcing_headers={
'Content-Type': 'application/xml',
'Content-Length': '19',
})
response = requests.get('http://github.com/foo')
expect(dict(response.headers)).to.equal({
'content-type': 'application/xml',
'content-length': '19',
})
@httprettified
@freeze_time("2013-10-04 04:20:00")
def test_httpretty_should_allow_adding_and_overwritting_by_kwargs_u2():
"HTTPretty should allow adding and overwritting headers by keyword args " \
"with requests"
HTTPretty.register_uri(HTTPretty.GET, "http://github.com/foo",
body="this is supposed to be the response",
server='Apache',
content_length='27',
content_type='application/json')
response = requests.get('http://github.com/foo')
expect(dict(response.headers)).to.equal({
'content-type': 'application/json',
'connection': 'close',
'content-length': '27',
'status': '200',
'server': 'Apache',
'date': 'Fri, 04 Oct 2013 04:20:00 GMT',
})
@httprettified
@within(two=miliseconds)
def test_rotating_responses_with_requests(now):
"HTTPretty should support rotating responses with requests"
HTTPretty.register_uri(
HTTPretty.GET, "https://api.yahoo.com/test",
responses=[
HTTPretty.Response(body=b"first response", status=201),
HTTPretty.Response(body=b'second and last response', status=202),
])
response1 = requests.get(
'https://api.yahoo.com/test')
expect(response1.status_code).to.equal(201)
expect(response1.text).to.equal('first response')
response2 = requests.get(
'https://api.yahoo.com/test')
expect(response2.status_code).to.equal(202)
expect(response2.text).to.equal('second and last response')
response3 = requests.get(
'https://api.yahoo.com/test')
expect(response3.status_code).to.equal(202)
expect(response3.text).to.equal('second and last response')
@httprettified
@within(two=miliseconds)
def test_can_inspect_last_request(now):
"HTTPretty.last_request is a mimetools.Message request from last match"
HTTPretty.register_uri(HTTPretty.POST, "http://api.github.com/",
body='{"repositories": ["HTTPretty", "lettuce"]}')
response = requests.post(
'http://api.github.com',
'{"username": "gabrielfalcao"}',
headers={
'content-type': 'text/json',
},
)
expect(HTTPretty.last_request.method).to.equal('POST')
expect(HTTPretty.last_request.body).to.equal(
b'{"username": "gabrielfalcao"}',
)
expect(HTTPretty.last_request.headers['content-type']).to.equal(
'text/json',
)
expect(response.json()).to.equal({"repositories": ["HTTPretty", "lettuce"]})
@httprettified
@within(two=miliseconds)
def test_can_inspect_last_request_with_ssl(now):
"HTTPretty.last_request is recorded even when mocking 'https' (SSL)"
HTTPretty.register_uri(HTTPretty.POST, "https://secure.github.com/",
body='{"repositories": ["HTTPretty", "lettuce"]}')
response = requests.post(
'https://secure.github.com',
'{"username": "gabrielfalcao"}',
headers={
'content-type': 'text/json',
},
)
expect(HTTPretty.last_request.method).to.equal('POST')
expect(HTTPretty.last_request.body).to.equal(
b'{"username": "gabrielfalcao"}',
)
expect(HTTPretty.last_request.headers['content-type']).to.equal(
'text/json',
)
expect(response.json()).to.equal({"repositories": ["HTTPretty", "lettuce"]})
@httprettified
@within(two=miliseconds)
def test_httpretty_ignores_querystrings_from_registered_uri(now):
"HTTPretty should ignore querystrings from the registered uri (requests library)"
HTTPretty.register_uri(HTTPretty.GET, "http://yipit.com/?id=123",
body=b"Find the best daily deals")
response = requests.get('http://yipit.com/', params={'id': 123})
expect(response.text).to.equal('Find the best daily deals')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/?id=123')
@httprettified
@within(five=miliseconds)
def test_streaming_responses(now):
"""
Mock a streaming HTTP response, like those returned by the Twitter streaming
API.
"""
@contextmanager
def in_time(time, message):
"""
A context manager that uses signals to force a time limit in tests
(unlike the `@within` decorator, which only complains afterward), or
raise an AssertionError.
"""
def handler(signum, frame):
raise AssertionError(message)
signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, time)
yield
signal.setitimer(signal.ITIMER_REAL, 0)
# XXX this obviously isn't a fully functional twitter streaming client!
twitter_response_lines = [
b'{"text":"If \\"for the boobs\\" requests to follow me one more time I\'m calling the police. http://t.co/a0mDEAD8"}\r\n',
b'\r\n',
b'{"text":"RT @onedirection: Thanks for all your # FollowMe1D requests Directioners! We\u2019ll be following 10 people throughout the day starting NOW. G ..."}\r\n'
]
TWITTER_STREAMING_URL = "https://stream.twitter.com/1/statuses/filter.json"
HTTPretty.register_uri(HTTPretty.POST, TWITTER_STREAMING_URL,
body=(l for l in twitter_response_lines),
streaming=True)
# taken from the requests docs
# test iterating by line
# Http://docs.python-requests.org/en/latest/user/advanced/# streaming-requests
response = requests.post(TWITTER_STREAMING_URL, data={'track': 'requests'},
auth=('username', 'password'), stream=True)
line_iter = response.iter_lines()
with in_time(0.01, 'Iterating by line is taking forever!'):
for i in range(len(twitter_response_lines)):
expect(next(line_iter).strip()).to.equal(
twitter_response_lines[i].strip())
HTTPretty.register_uri(HTTPretty.POST, TWITTER_STREAMING_URL,
body=(l for l in twitter_response_lines),
streaming=True)
# test iterating by line after a second request
response = requests.post(
TWITTER_STREAMING_URL,
data={
'track': 'requests'
},
auth=('username', 'password'),
stream=True,
)
line_iter = response.iter_lines()
with in_time(0.01, 'Iterating by line is taking forever the second time '
'around!'):
for i in range(len(twitter_response_lines)):
expect(next(line_iter).strip()).to.equal(
twitter_response_lines[i].strip())
HTTPretty.register_uri(HTTPretty.POST, TWITTER_STREAMING_URL,
body=(l for l in twitter_response_lines),
streaming=True)
# test iterating by char
response = requests.post(
TWITTER_STREAMING_URL,
data={
'track': 'requests'
},
auth=('username', 'password'),
stream=True
)
twitter_expected_response_body = b''.join(twitter_response_lines)
with in_time(0.02, 'Iterating by char is taking forever!'):
twitter_body = b''.join(c for c in response.iter_content(chunk_size=1))
expect(twitter_body).to.equal(twitter_expected_response_body)
# test iterating by chunks larger than the stream
HTTPretty.register_uri(HTTPretty.POST, TWITTER_STREAMING_URL,
body=(l for l in twitter_response_lines),
streaming=True)
response = requests.post(TWITTER_STREAMING_URL, data={'track': 'requests'},
auth=('username', 'password'), stream=True)
with in_time(0.02, 'Iterating by large chunks is taking forever!'):
twitter_body = b''.join(c for c in
response.iter_content(chunk_size=1024))
expect(twitter_body).to.equal(twitter_expected_response_body)
@httprettified
def test_multiline():
url = 'https://httpbin.org/post'
data = b'content=Im\r\na multiline\r\n\r\nsentence\r\n'
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Accept': 'text/plain',
}
HTTPretty.register_uri(
HTTPretty.POST,
url,
)
response = requests.post(url, data=data, headers=headers)
expect(response.status_code).to.equal(200)
expect(HTTPretty.last_request.method).to.equal('POST')
expect(HTTPretty.last_request.url).to.equal('https://httpbin.org/post')
expect(HTTPretty.last_request.protocol).to.equal('https')
expect(HTTPretty.last_request.path).to.equal('/post')
expect(HTTPretty.last_request.body).to.equal(data)
expect(HTTPretty.last_request.headers['content-length']).to.equal('37')
expect(HTTPretty.last_request.headers['content-type']).to.equal('application/x-www-form-urlencoded; charset=utf-8')
expect(len(HTTPretty.latest_requests)).to.equal(1)
@httprettified
def test_octet_stream():
url = 'https://httpbin.org/post'
data = b"\xf5\x00\x00\x00" # utf-8 with invalid start byte
headers = {
'Content-Type': 'application/octet-stream',
}
HTTPretty.register_uri(
HTTPretty.POST,
url,
)
response = requests.post(url, data=data, headers=headers)
expect(response.status_code).to.equal(200)
expect(HTTPretty.last_request.method).to.equal('POST')
expect(HTTPretty.last_request.url).to.equal('https://httpbin.org/post')
expect(HTTPretty.last_request.protocol).to.equal('https')
expect(HTTPretty.last_request.path).to.equal('/post')
expect(HTTPretty.last_request.body).to.equal(data)
expect(HTTPretty.last_request.headers['content-length']).to.equal('4')
expect(HTTPretty.last_request.headers['content-type']).to.equal('application/octet-stream')
expect(len(HTTPretty.latest_requests)).to.equal(1)
@httprettified
def test_multipart():
url = 'https://httpbin.org/post'
data = b'--xXXxXXyYYzzz\r\nContent-Disposition: form-data; name="content"\r\nContent-Type: text/plain; charset=utf-8\r\nContent-Length: 68\r\n\r\nAction: comment\nText: Comment with attach\nAttachment: x1.txt, x2.txt\r\n--xXXxXXyYYzzz\r\nContent-Disposition: form-data; name="attachment_2"; filename="x.txt"\r\nContent-Type: text/plain\r\nContent-Length: 4\r\n\r\nbye\n\r\n--xXXxXXyYYzzz\r\nContent-Disposition: form-data; name="attachment_1"; filename="x.txt"\r\nContent-Type: text/plain\r\nContent-Length: 4\r\n\r\nbye\n\r\n--xXXxXXyYYzzz--\r\n'
headers = {'Content-Length': '495', 'Content-Type': 'multipart/form-data; boundary=xXXxXXyYYzzz', 'Accept': 'text/plain'}
HTTPretty.register_uri(
HTTPretty.POST,
url,
)
response = requests.post(url, data=data, headers=headers)
expect(response.status_code).to.equal(200)
expect(HTTPretty.last_request.method).to.equal('POST')
expect(HTTPretty.last_request.url).to.equal('https://httpbin.org/post')
expect(HTTPretty.last_request.protocol).to.equal('https')
expect(HTTPretty.last_request.path).to.equal('/post')
expect(HTTPretty.last_request.body).to.equal(data)
expect(HTTPretty.last_request.headers['content-length']).to.equal('495')
expect(HTTPretty.last_request.headers['content-type']).to.equal('multipart/form-data; boundary=xXXxXXyYYzzz')
expect(len(HTTPretty.latest_requests)).to.equal(1)
@httprettified
@within(two=miliseconds)
def test_callback_response(now):
("HTTPretty should call a callback function and set its return value as the body of the response"
" requests")
def request_callback(request, uri, headers):
return [200, headers, "The {} response from {}".format(decode_utf8(request.method), uri)]
HTTPretty.register_uri(
HTTPretty.GET, "https://api.yahoo.com/test",
body=request_callback)
response = requests.get('https://api.yahoo.com/test')
expect(response.text).to.equal("The GET response from https://api.yahoo.com/test")
HTTPretty.register_uri(
HTTPretty.POST, "https://api.yahoo.com/test_post",
body=request_callback)
response = requests.post(
"https://api.yahoo.com/test_post",
{"username": "gabrielfalcao"}
)
expect(response.text).to.equal("The POST response from https://api.yahoo.com/test_post")
@httprettified
@within(two=miliseconds)
def test_callback_body_remains_callable_for_any_subsequent_requests(now):
("HTTPretty should call a callback function more than one"
" requests")
def request_callback(request, uri, headers):
return [200, headers, "The {} response from {}".format(decode_utf8(request.method), uri)]
HTTPretty.register_uri(
HTTPretty.GET, "https://api.yahoo.com/test",
body=request_callback)
response = requests.get('https://api.yahoo.com/test')
expect(response.text).to.equal("The GET response from https://api.yahoo.com/test")
response = requests.get('https://api.yahoo.com/test')
expect(response.text).to.equal("The GET response from https://api.yahoo.com/test")
@httprettified
@within(two=miliseconds)
def test_callback_setting_headers_and_status_response(now):
("HTTPretty should call a callback function and uses it retur tuple as status code, headers and body"
" requests")
def request_callback(request, uri, headers):
headers.update({'a': 'b'})
return [418, headers, "The {} response from {}".format(decode_utf8(request.method), uri)]
HTTPretty.register_uri(
HTTPretty.GET, "https://api.yahoo.com/test",
body=request_callback)
response = requests.get('https://api.yahoo.com/test')
expect(response.text).to.equal("The GET response from https://api.yahoo.com/test")
expect(response.headers).to.have.key('a').being.equal("b")
expect(response.status_code).to.equal(418)
HTTPretty.register_uri(
HTTPretty.POST, "https://api.yahoo.com/test_post",
body=request_callback)
response = requests.post(
"https://api.yahoo.com/test_post",
{"username": "gabrielfalcao"}
)
expect(response.text).to.equal("The POST response from https://api.yahoo.com/test_post")
expect(response.headers).to.have.key('a').being.equal("b")
expect(response.status_code).to.equal(418)
@httprettified
def test_httpretty_should_respect_matcher_priority():
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r".*"),
body='high priority',
priority=5,
)
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r".+"),
body='low priority',
priority=0,
)
response = requests.get('http://api.yipit.com/v1/')
expect(response.text).to.equal('high priority')
@httprettified
@within(two=miliseconds)
def test_callback_setting_content_length_on_head(now):
("HTTPretty should call a callback function, use it's return tuple as status code, headers and body"
" requests and respect the content-length header when responding to HEAD")
def request_callback(request, uri, headers):
headers.update({'content-length': 12345})
return [200, headers, ""]
HTTPretty.register_uri(
HTTPretty.HEAD, "https://api.yahoo.com/test",
body=request_callback)
response = requests.head('https://api.yahoo.com/test')
expect(response.headers).to.have.key('content-length').being.equal("12345")
expect(response.status_code).to.equal(200)
@httprettified
def test_httpretty_should_allow_registering_regexes_and_give_a_proper_match_to_the_callback():
"HTTPretty should allow registering regexes with requests and giva a proper match to the callback"
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r"https://api.yipit.com/v1/deal;brand=(?P<brand_name>\w+)"),
body=lambda method, uri, headers: [200, headers, uri]
)
response = requests.get('https://api.yipit.com/v1/deal;brand=gap?first_name=chuck&last_name=norris')
expect(response.text).to.equal('https://api.yipit.com/v1/deal;brand=gap?first_name=chuck&last_name=norris')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/v1/deal;brand=gap?first_name=chuck&last_name=norris')
@httprettified
def test_httpretty_should_allow_registering_regexes():
"HTTPretty should allow registering regexes with requests"
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r"https://api.yipit.com/v1/deal;brand=(?P<brand_name>\w+)"),
body="Found brand",
)
response = requests.get('https://api.yipit.com/v1/deal;brand=gap?first_name=chuck&last_name=norris'
)
expect(response.text).to.equal('Found brand')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/v1/deal;brand=gap?first_name=chuck&last_name=norris')
@httprettified
def test_httpretty_provides_easy_access_to_querystrings_with_regexes():
"HTTPretty should match regexes even if they have a different querystring"
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r"https://api.yipit.com/v1/(?P<endpoint>\w+)/$"),
body="Find the best daily deals"
)
response = requests.get('https://api.yipit.com/v1/deals/?foo=bar&foo=baz&chuck=norris')
expect(response.text).to.equal("Find the best daily deals")
expect(HTTPretty.last_request.querystring).to.equal({
'foo': ['bar', 'baz'],
'chuck': ['norris'],
})
@httprettified(verbose=True)
def test_httpretty_allows_to_chose_if_querystring_should_be_matched():
"HTTPretty should provide a way to not match regexes that have a different querystring"
HTTPretty.register_uri(
HTTPretty.GET,
"http://localhost:9090",
)
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r"http://localhost:9090/what/?$"),
body="Nudge, nudge, wink, wink. Know what I mean?",
match_querystring=True
)
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r"http://localhost:9090/what.*[?]?.*"),
body="Different",
match_querystring=False
)
response = requests.get('http://localhost:9090/what/')
expect(response.text).to.equal('Nudge, nudge, wink, wink. Know what I mean?')
response = requests.get('http://localhost:9090/what/', params={'flying': 'coconuts'})
expect(response.text).to.not_be.equal('Nudge, nudge, wink, wink. Know what I mean?')
@httprettified
def test_httpretty_should_allow_multiple_methods_for_the_same_uri():
"HTTPretty should allow registering multiple methods for the same uri"
url = 'http://test.com/test'
methods = ['GET', 'POST', 'PUT', 'OPTIONS']
for method in methods:
HTTPretty.register_uri(
getattr(HTTPretty, method),
url,
method
)
for method in methods:
request_action = getattr(requests, method.lower())
expect(request_action(url).text).to.equal(method)
@httprettified
def test_httpretty_should_allow_registering_regexes_with_streaming_responses():
"HTTPretty should allow registering regexes with streaming responses"
os.environ['DEBUG'] = 'true'
def my_callback(request, url, headers):
request.body.should.equal(b'hithere')
return 200, headers, "Received"
HTTPretty.register_uri(
HTTPretty.POST,
re.compile(r"https://api.yipit.com/v1/deal;brand=(?P<brand_name>\w+)"),
body=my_callback,
)
def gen():
yield b'hi'
yield b'there'
response = requests.post(
'https://api.yipit.com/v1/deal;brand=gap?first_name=chuck&last_name=norris',
data=gen(),
)
expect(response.content).to.equal(b"Received")
expect(HTTPretty.last_request.method).to.equal('POST')
expect(HTTPretty.last_request.path).to.equal('/v1/deal;brand=gap?first_name=chuck&last_name=norris')
@httprettified
def test_httpretty_should_allow_multiple_responses_with_multiple_methods():
"HTTPretty should allow multiple responses when binding multiple methods to the same uri"
url = 'http://test.com/list'
# add get responses
HTTPretty.register_uri(
HTTPretty.GET, url,
responses=[
HTTPretty.Response(body='a'),
HTTPretty.Response(body='b'),
]
)
# add post responses
HTTPretty.register_uri(
HTTPretty.POST, url,
responses=[
HTTPretty.Response(body='c'),
HTTPretty.Response(body='d'),
]
)
expect(requests.get(url).text).to.equal('a')
expect(requests.post(url).text).to.equal('c')
expect(requests.get(url).text).to.equal('b')
expect(requests.get(url).text).to.equal('b')
expect(requests.get(url).text).to.equal('b')
expect(requests.post(url).text).to.equal('d')
expect(requests.post(url).text).to.equal('d')
expect(requests.post(url).text).to.equal('d')
@httprettified
def test_httpretty_should_normalize_url_patching():
"HTTPretty should normalize all url patching"
HTTPretty.register_uri(
HTTPretty.GET,
"http://yipit.com/foo(bar)",
body="Find the best daily deals")
response = requests.get('http://yipit.com/foo%28bar%29')
expect(response.text).to.equal('Find the best daily deals')
@httprettified
def test_lack_of_trailing_slash():
("HTTPretty should automatically append a slash to given urls")
url = 'http://www.youtube.com'
HTTPretty.register_uri(HTTPretty.GET, url, body='')
response = requests.get(url)
response.status_code.should.equal(200)
@httprettified
def test_unicode_querystrings():
("Querystrings should accept unicode characters")
HTTPretty.register_uri(HTTPretty.GET, "http://yipit.com/login",
body="Find the best daily deals")
requests.get('http://yipit.com/login?user=Gabriel+Falcão')
expect(HTTPretty.last_request.querystring['user'][0]).should.be.equal('Gabriel Falcão')
@use_tornado_server
def test_recording_calls(port):
("HTTPretty should be able to record calls")
# Given a destination path:
destination = FIXTURE_FILE("recording-1.json")
# When I record some calls
with HTTPretty.record(destination):
requests.get(server_url("/foobar?name=Gabriel&age=25", port))
requests.post(server_url("/foobar", port),
data=json.dumps({'test': '123'}),
headers={"Test": "foobar"})
# Then the destination path should exist
os.path.exists(destination).should.be.true
# And the contents should be json
raw = open(destination).read()
json.loads.when.called_with(raw).should_not.throw(ValueError)
# And the contents should be expected
data = json.loads(raw)
data.should.be.a(list)
data.should.have.length_of(2)
# And the responses should have the expected keys
response = data[0]
response.should.have.key("request").being.length_of(5)
response.should.have.key("response").being.length_of(3)
response['request'].should.have.key("method").being.equal("GET")
response['request'].should.have.key("headers").being.a(dict)
response['request'].should.have.key("querystring").being.equal({
"age": [
"25"
],
"name": [
"Gabriel"
]
})
response['response'].should.have.key("status").being.equal(200)
response['response'].should.have.key("body").being.an(str)
response['response'].should.have.key("headers").being.a(dict)
# older urllib3 had a bug where header keys were lower-cased:
# https://github.com/shazow/urllib3/issues/236
# cope with that
if 'server' in response['response']["headers"]:
response['response']["headers"]["Server"] = response['response']["headers"].pop("server")
response['response']["headers"].should.have.key("Server").being.equal("TornadoServer/" + tornado_version)
# And When I playback the previously recorded calls
with HTTPretty.playback(destination):
# And make the expected requests
response1 = requests.get(server_url("/foobar?name=Gabriel&age=25", port))
response2 = requests.post(
server_url("/foobar", port),
data=json.dumps({'test': '123'}),
headers={"Test": "foobar"},
)
# Then the responses should be the expected
response1.json().should.equal({"foobar": {"age": "25", "name": "Gabriel"}})
response2.json()["foobar"].should.equal({})
response2.json()["req_body"].should.equal(json.dumps({"test": "123"}))
response2.json()["req_headers"].should.have.key("Test")
response2.json()["req_headers"]["Test"].should.equal("foobar")
@httprettified
def test_py26_callback_response():
("HTTPretty should call a callback function *once* and set its return value"
" as the body of the response requests")
def _request_callback(request, uri, headers):
return [200, headers, "The {} response from {}".format(decode_utf8(request.method), uri)]
request_callback = Mock()
request_callback.side_effect = _request_callback
HTTPretty.register_uri(
HTTPretty.POST, "https://api.yahoo.com/test_post",
body=request_callback)
requests.post(
"https://api.yahoo.com/test_post",
{"username": "gabrielfalcao"}
)
os.environ['STOP'] = 'true'
expect(request_callback.call_count).equal(1)
@httprettified
def test_httpretty_should_work_with_non_standard_ports():
"HTTPretty should work with a non-standard port number"
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r"https://api.yipit.com:1234/v1/deal;brand=(?P<brand_name>\w+)"),
body=lambda method, uri, headers: [200, headers, uri]
)
HTTPretty.register_uri(
HTTPretty.POST,
"https://asdf.com:666/meow",
body=lambda method, uri, headers: [200, headers, uri]
)
response = requests.get('https://api.yipit.com:1234/v1/deal;brand=gap?first_name=chuck&last_name=norris')
expect(response.text).to.equal('https://api.yipit.com:1234/v1/deal;brand=gap?first_name=chuck&last_name=norris')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/v1/deal;brand=gap?first_name=chuck&last_name=norris')
response = requests.post('https://asdf.com:666/meow')
expect(response.text).to.equal('https://asdf.com:666/meow')
expect(HTTPretty.last_request.method).to.equal('POST')
expect(HTTPretty.last_request.path).to.equal('/meow')
@httprettified
def test_httpretty_reset_by_switching_protocols_for_same_port():
"HTTPretty should reset protocol/port associations"
HTTPretty.register_uri(
HTTPretty.GET,
"http://api.yipit.com:1234/v1/deal",
body=lambda method, uri, headers: [200, headers, uri]
)
response = requests.get('http://api.yipit.com:1234/v1/deal')
expect(response.text).to.equal('http://api.yipit.com:1234/v1/deal')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/v1/deal')
HTTPretty.reset()
HTTPretty.register_uri(
HTTPretty.GET,
"https://api.yipit.com:1234/v1/deal",
body=lambda method, uri, headers: [200, headers, uri]
)
response = requests.get('https://api.yipit.com:1234/v1/deal')
expect(response.text).to.equal('https://api.yipit.com:1234/v1/deal')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/v1/deal')
@httprettified
def test_httpretty_should_allow_registering_regexes_with_port_and_give_a_proper_match_to_the_callback():
"HTTPretty should allow registering regexes with requests and giva a proper match to the callback"
HTTPretty.register_uri(
HTTPretty.GET,
re.compile(r"https://api.yipit.com:1234/v1/deal;brand=(?P<brand_name>\w+)"),
body=lambda method, uri, headers: [200, headers, uri]
)
response = requests.get('https://api.yipit.com:1234/v1/deal;brand=gap?first_name=chuck&last_name=norris')
expect(response.text).to.equal('https://api.yipit.com:1234/v1/deal;brand=gap?first_name=chuck&last_name=norris')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('/v1/deal;brand=gap?first_name=chuck&last_name=norris')
@httprettified
def test_httpretty_should_handle_paths_starting_with_two_slashes():
"HTTPretty should handle URLs with paths starting with //"
HTTPretty.register_uri(
HTTPretty.GET, "http://example.com//foo",
body="Find the best foo"
)
response = requests.get('http://example.com//foo')
expect(response.text).to.equal('Find the best foo')
expect(HTTPretty.last_request.method).to.equal('GET')
expect(HTTPretty.last_request.path).to.equal('//foo')
| {
"content_hash": "d5a73dc766b7492b00c818864c0de9be",
"timestamp": "",
"source": "github",
"line_count": 925,
"max_line_length": 551,
"avg_line_length": 36.101621621621625,
"alnum_prop": 0.6510750434209738,
"repo_name": "gabrielfalcao/HTTPretty",
"id": "55aa109d46a64c410d77448716c185e36e4fce39",
"size": "34573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/test_requests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2159"
},
{
"name": "Python",
"bytes": "228046"
},
{
"name": "Shell",
"bytes": "2276"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('attack', models.IntegerField()),
('defence', models.IntegerField()),
('description', models.TextField()),
],
),
]
| {
"content_hash": "ccd8c02d8455615e673fdee9ee84e428",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 114,
"avg_line_length": 26.291666666666668,
"alnum_prop": 0.5451664025356577,
"repo_name": "ualikhansars/Gwent",
"id": "6a98507a9ef501bed84201d1bc28f4227aa21ebe",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/cards/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "94967"
},
{
"name": "HTML",
"bytes": "80213"
},
{
"name": "JavaScript",
"bytes": "206947"
},
{
"name": "Python",
"bytes": "6849933"
},
{
"name": "Shell",
"bytes": "3274"
}
],
"symlink_target": ""
} |
import gdal
import getopt
import json
import math
import numpy
import os
import sys
import tempfile
from gdalconst import *
from osgeo import osr
from dateutil.parser import parse
from pymrgeo import MrGeo
from pymrgeo.rastermapop import RasterMapOp
# The purpose of this script is to provide an example of how pymrgeo can
# be used for ingesting and processing imagery. It makes use of Landsat8
# source imagery which is freely available in s3://landsat-pds/L8/
#
# This script will produce a single MrGeo image with the normalized difference
# vegetation index (NDVI) computed across a set of source scenes from Landsat8.
# The user should copy the Landsat data for all of the scenes they wish to
# process into a local directory structure. It processes bands 4, 5 and BQA
# and reads the *_MTL.json files for metadata.
#
# The user specifies the root directory for the images to process along with
# the month and year for which to compute the NDVI. The script finds the latest
# timestamp for each scene within that month and year, then uses GDAL to
# pre-process the source file to compute the TOA reflectance for bands 4 (red)
# and 5 (nir) and saves the results to temp files. It then ingests all of those
# temp files as a single MrGeo image, and using pymrgeo map algebra capabilities,
# computes the NDVI and saves it as a MrGeo image.
def toa_reflectance(Array, bqa_data, metadata, band):
scales = metadata['L1_METADATA_FILE']['RADIOMETRIC_RESCALING']
mult = scales['REFLECTANCE_MULT_BAND_' + str(band)]
add = scales['REFLECTANCE_ADD_BAND_' + str(band)]
print('band: ' + str(band) + ' mult: ' + str(mult) + ' add: ' + str(add))
# Ignore pixels with a high certainty of cloud cover, ice/snow, or water.
Array[(numpy.bitwise_and(bqa_data, 0x8820) != 0)] = numpy.nan
conds = numpy.logical_not(numpy.isnan(Array))
Array[conds] *= mult
Array[conds] += add
def toa_radiance(Array, metadata, band):
scales = metadata['L1_METADATA_FILE']['RADIOMETRIC_RESCALING']
mult = scales['RADIANCE_MULT_BAND_' + str(band)]
add = scales['RADIANCE_ADD_BAND_' + str(band)]
sun_elev = metadata['L1_METADATA_FILE']['IMAGE_ATTRIBUTES']['SUN_ELEVATION'] * 0.0174533 # degrees to rad
print('band: ' + str(band) + ' mult: ' + str(mult) + ' add: ' + str(add))
Array[Array != 0] = ((Array * mult) + add) / math.sin(sun_elev)
def load_all_metadata(root):
metadata = []
# find all the landsat images
if os.path.exists(root):
for dirname, subdirs, files in os.walk(root):
for name in files:
pathname = os.path.join(dirname, name)
base, ext = os.path.splitext(pathname)
ext = ext.lower()
base = base.lower()
if ext == '.json':
with open(pathname) as metafile:
md = json.load(metafile)
md["PARENT_DIR"] = dirname
md["BQA"] = os.path.join(dirname,
md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_QUALITY"])
md[1] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_1"])
md[2] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_2"])
md[3] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_3"])
md[4] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_4"])
md[5] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_5"])
md[6] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_6"])
md[7] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_7"])
md[8] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_8"])
md[9] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_9"])
md[10] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_10"])
md[11] = os.path.join(dirname, md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["FILE_NAME_BAND_11"])
metadata.append(md)
metafile.close()
pass
return metadata
# Function to read the original file's projection:
def GetGeoInfo(FileName):
SourceDS = gdal.Open(FileName, GA_ReadOnly)
NDV = SourceDS.GetRasterBand(1).GetNoDataValue()
xsize = SourceDS.RasterXSize
ysize = SourceDS.RasterYSize
GeoT = SourceDS.GetGeoTransform()
Projection = osr.SpatialReference()
Projection.ImportFromWkt(SourceDS.GetProjectionRef())
data_type = SourceDS.GetRasterBand(1).DataType
data_type = gdal.GetDataTypeName(data_type)
return NDV, xsize, ysize, GeoT, Projection, data_type
# Function to write a new file.
def CreateGeoTiff(Name, Array, driver, NDV,
xsize, ysize, GeoT, Projection, data_type):
if data_type == 'Float32':
data_type = gdal.GDT_Float32
# Set up the dataset
DataSet = driver.Create(Name, xsize, ysize, 1, data_type)
# the '1' is for band 1.
DataSet.SetGeoTransform(GeoT)
DataSet.SetProjection(Projection.ExportToWkt())
# Write the array
DataSet.GetRasterBand(1).WriteArray(Array)
if NDV is not None:
DataSet.GetRasterBand(1).SetNoDataValue(NDV)
# DataSet.close()
def main(argv):
root = ''
month = -1
year = -1
output_postfix = ''
try:
opts, args = getopt.getopt(argv, "hr:m:y:o:", ["root=", "month=,year=,output-postfix="])
except getopt.GetoptError:
print 'multi-landsat.py -r <rootdir> -m <monthnum> -y <year> -o <output-postfix>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'multi-landsat.py -r <rootdir> -m <monthnum> -y <year> -o <output-postfix>'
sys.exit()
elif opt in ("-r", "--root"):
root = arg
elif opt in ("-m", "--month"):
month = int(arg)
elif opt in ("-y", "--year"):
year = int(arg)
elif opt in ("-o", "--output-postfix"):
output_postfix = arg
print 'Root is %s' % (root)
print 'Month is %d' % (month)
print 'Year is %d' % (year)
metadata = load_all_metadata(root)
# Filter the metadata to find the most recent imagery for each scene.
# It is assumed that the root directory only contains scenes we are
# interested in.
filtered_metadata = {}
for md in metadata:
acqdate = parse(md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["DATE_ACQUIRED"])
if acqdate.month == month and acqdate.year == year:
wrs_path = int(md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["WRS_PATH"])
wrs_row = int(md["L1_METADATA_FILE"]["PRODUCT_METADATA"]["WRS_ROW"])
key = "%03d%03d" % (wrs_path, wrs_row)
if filtered_metadata.get(key) is None:
filtered_metadata[key] = md
else:
existing_acq_date = parse(
filtered_metadata[key]["L1_METADATA_FILE"]["PRODUCT_METADATA"]["DATE_ACQUIRED"])
if acqdate > existing_acq_date:
filtered_metadata[key] = md
for key in filtered_metadata:
print "Processing scene at path %s, row %s, acquired %s, at %s" % (
filtered_metadata[key]["L1_METADATA_FILE"]["PRODUCT_METADATA"]["WRS_PATH"],
filtered_metadata[key]["L1_METADATA_FILE"]["PRODUCT_METADATA"]["WRS_ROW"],
filtered_metadata[key]["L1_METADATA_FILE"]["PRODUCT_METADATA"]["DATE_ACQUIRED"],
filtered_metadata[key]["PARENT_DIR"])
# being a separate image
mrgeo = MrGeo()
mrgeo.usedebug()
mrgeo.start()
# Let's go ahead and ingest those images into MrGeo, with each band
red_refl = ingest_reflectance_image(mrgeo, filtered_metadata, 4, "landsat-red-refl" + output_postfix)
nir_refl = ingest_reflectance_image(mrgeo, filtered_metadata, 5, "landsat-nir-refl" + output_postfix)
ndvi = (nir_refl - red_refl) / (nir_refl + red_refl)
ndvi.save('landsat-ndvi' + output_postfix)
mrgeo.stop()
print("***** Done *****")
def ingest_reflectance_image(mrgeo, filtered_metadata, band, image_name):
cnt = 0
red_refl_paths = mrgeo.gateway.new_array(mrgeo.gateway.jvm.String, len(filtered_metadata))
driver = gdal.GetDriverByName('GTiff')
for key in filtered_metadata:
md = filtered_metadata[key]
red_ds = gdal.Open(md[band], GA_ReadOnly)
bqa_ds = gdal.Open(md["BQA"], GA_ReadOnly)
ndv, xsize, ysize, GeoT, Projection, data_type = GetGeoInfo(md[band])
if ndv is None:
ndv = 0
red_band = red_ds.GetRasterBand(1)
red_data = red_band.ReadAsArray()
bqa_band = bqa_ds.GetRasterBand(1)
bqa_data = bqa_band.ReadAsArray()
data_type = gdal.GDT_Float32
red_data = numpy.float32(red_data)
# Change nodata value to NaN since we're changing the type to Float32
red_data[red_data == ndv] = RasterMapOp.nan()
ndv = RasterMapOp.nan()
toa_reflectance(red_data, bqa_data, md, band)
# Now turn the array into a GTiff.
ntf = tempfile.NamedTemporaryFile(suffix=".tif", delete=False)
red_temp_file = ntf.name
CreateGeoTiff(red_temp_file, red_data, driver, ndv,
xsize, ysize, GeoT, Projection, data_type)
red_refl_paths[cnt] = red_temp_file
cnt += 1
# close the datasets
red_ds = None
bqa_ds = None
red = mrgeo.ingest_image(red_refl_paths)
red.save(image_name)
for rf in red_refl_paths:
os.remove(rf)
return red
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "dbb14c6b45aba26f9a94ce89252146d6",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 119,
"avg_line_length": 42.0836820083682,
"alnum_prop": 0.6102604891628555,
"repo_name": "ngageoint/mrgeo",
"id": "99c5a02c75ac7678c7283a0da98523ab47e0cecb",
"size": "10082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mrgeo-python/src/test/python/landsat-ndvi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "FreeMarker",
"bytes": "2033"
},
{
"name": "Java",
"bytes": "3075234"
},
{
"name": "Jupyter Notebook",
"bytes": "21757"
},
{
"name": "Python",
"bytes": "162426"
},
{
"name": "Scala",
"bytes": "775082"
},
{
"name": "Scheme",
"bytes": "17974"
},
{
"name": "Shell",
"bytes": "75079"
}
],
"symlink_target": ""
} |
'''
pydiatra helper functions
'''
import contextlib
class ExceptionContext(object):
def __init__(self):
self.exception = None
def __str__(self):
return str(self.exception)
def __nonzero__(self):
return self.exception is not None
__bool__ = __nonzero__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.exception = exc_value
return True
def catch_exceptions():
return ExceptionContext()
# pylint: disable=undefined-loop-variable
@contextlib.contextmanager
def monkeypatch(obj, **kwargs):
orig = {}
for name in kwargs:
orig[name] = getattr(obj, name)
try:
for name, value in kwargs.items():
setattr(obj, name, value)
yield
finally:
for name, value in orig.items():
setattr(obj, name, value)
# pylint: enable=undefined-loop-variable
__all__ = [
'catch_exceptions',
'monkeypatch',
]
# vim:ts=4 sts=4 sw=4 et
| {
"content_hash": "951b75a7336379d248d85bf0605e2ed5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 48,
"avg_line_length": 20.1,
"alnum_prop": 0.5970149253731343,
"repo_name": "jwilk/pydiatra",
"id": "481d735171307faf9de69e5317b1a223112faaca",
"size": "2143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydiatra/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116290"
},
{
"name": "Shell",
"bytes": "9315"
}
],
"symlink_target": ""
} |
import os
from pymongo import read_preferences
class Config(object):
# Secret key for the cookies. Change in production.
SECRET_KEY = 'secret'
# Mongo DB settings.
MONGODB_SETTINGS = {
'db': 'next_frontend',
'host': os.environ.get('MONGODB_PORT_27017_TCP_ADDR','localhost'),
'port': int(os.environ.get('MONGODB_PORT_27017_TCP_PORT',27017)),
'read_preference': read_preferences.ReadPreference.PRIMARY
}
# MongoDB info
MONGODB_HOST = os.environ.get('MONGODB_PORT_27017_TCP_ADDR','localhost')
MONGODB_PORT = int(os.environ.get('MONGODB_PORT_27017_TCP_PORT',27017))
MONGODB_FRONTEND_DB_NAME = 'next_frontend'
# AWS S3 info
AWS_ID = os.environ.get('AWS_ACCESS_ID', None)
AWS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', None)
AWS_BUCKET_NAME = os.environ.get('AWS_BUCKET_NAME','next.discovery')
# Local docker frontend base links (This is not set, therefor always is localhost, which is fine)
NEXT_BACKEND_HOST = os.environ.get("NEXT_BACKEND_GLOBAL_HOST", 'localhost')
NEXT_BACKEND_PORT = os.environ.get("NEXT_BACKEND_GLOBAL_PORT", "8000")
# Global frontend base links. Used for widgets and stats calls.
NEXT_BACKEND_GLOBAL_HOST = os.environ.get("NEXT_BACKEND_GLOBAL_HOST", 'localhost')
NEXT_BACKEND_GLOBAL_PORT = os.environ.get("NEXT_BACKEND_GLOBAL_PORT", "8000")
# Global frontend links. Used for widgets and stats calls.
NEXT_FRONTEND_GLOBAL_HOST = os.environ.get("NEXT_FRONTEND_GLOBAL_HOST", 'localhost')
NEXT_FRONTEND_GLOBAL_PORT = os.environ.get("NEXT_FRONTEND_GLOBAL_PORT", "80")
# Site ID and Key for Next Frontend Base.
SITE_ID = '99eb2f19d5a303acc8fa1a6e9e05cd'
SITE_KEY = '7c48d1b377889d9ec0322e1dcf7f39'
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite:///../next_baseDB.db'
CACHE_TYPE = 'simple'
class DevConfig(Config):
# Config variables for the debug panel
DEBUG = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
DEBUG_TB_PANELS = ['flask.ext.mongoengine.panels.MongoDebugPanel']
CACHE_TYPE = 'null'
# This allows us to test the forms from WTForm
WTF_CSRF_ENABLED = False
| {
"content_hash": "e3464253a79bb834ea250afc5f60b94b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 101,
"avg_line_length": 37.724137931034484,
"alnum_prop": 0.6860146252285192,
"repo_name": "lalitkumarj/NEXT-psych",
"id": "d162288e92929ef07c476c6ced2efb5bd8f3d8f6",
"size": "2188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/base/settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "197644"
},
{
"name": "HTML",
"bytes": "358541"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "2714780"
},
{
"name": "Makefile",
"bytes": "2880"
},
{
"name": "Perl",
"bytes": "5546"
},
{
"name": "Python",
"bytes": "782418"
},
{
"name": "Shell",
"bytes": "7340"
}
],
"symlink_target": ""
} |
import itertools
import uuid
try:
import asyncio
except ImportError:
import trollius as asyncio
import daiquiri
from oslo_config import cfg
import six
from gnocchi import incoming
from gnocchi import indexer
from gnocchi import service
from gnocchi import storage
from gnocchi import utils
LOG = daiquiri.getLogger(__name__)
class Stats(object):
def __init__(self, conf):
self.conf = conf
self.incoming = incoming.get_driver(self.conf)
self.indexer = indexer.get_driver(self.conf)
try:
self.indexer.create_resource('generic',
self.conf.statsd.resource_id,
self.conf.statsd.creator)
except indexer.ResourceAlreadyExists:
LOG.debug("Resource %s already exists",
self.conf.statsd.resource_id)
else:
LOG.info("Created resource %s", self.conf.statsd.resource_id)
self.gauges = {}
self.counters = {}
self.times = {}
def reset(self):
self.gauges.clear()
self.counters.clear()
self.times.clear()
def treat_metric(self, metric_name, metric_type, value, sampling):
metric_name += "|" + metric_type
if metric_type == "ms":
if sampling is not None:
raise ValueError(
"Invalid sampling for ms: `%d`, should be none"
% sampling)
self.times[metric_name] = storage.Measure(
utils.dt_in_unix_ns(utils.utcnow()), value)
elif metric_type == "g":
if sampling is not None:
raise ValueError(
"Invalid sampling for g: `%d`, should be none"
% sampling)
self.gauges[metric_name] = storage.Measure(
utils.dt_in_unix_ns(utils.utcnow()), value)
elif metric_type == "c":
sampling = 1 if sampling is None else sampling
if metric_name in self.counters:
current_value = self.counters[metric_name].value
else:
current_value = 0
self.counters[metric_name] = storage.Measure(
utils.dt_in_unix_ns(utils.utcnow()),
current_value + (value * (1 / sampling)))
# TODO(jd) Support "set" type
# elif metric_type == "s":
# pass
else:
raise ValueError("Unknown metric type `%s'" % metric_type)
def flush(self):
resource = self.indexer.get_resource('generic',
self.conf.statsd.resource_id,
with_metrics=True)
for metric_name, measure in itertools.chain(
six.iteritems(self.gauges),
six.iteritems(self.counters),
six.iteritems(self.times)):
try:
# NOTE(jd) We avoid considering any concurrency here as statsd
# is not designed to run in parallel and we do not envision
# operators manipulating the resource/metrics using the Gnocchi
# API at the same time.
metric = resource.get_metric(metric_name)
if not metric:
ap_name = self._get_archive_policy_name(metric_name)
metric = self.indexer.create_metric(
uuid.uuid4(),
self.conf.statsd.creator,
archive_policy_name=ap_name,
name=metric_name,
resource_id=self.conf.statsd.resource_id)
self.incoming.add_measures(metric, (measure,))
except Exception as e:
LOG.error("Unable to add measure %s: %s",
metric_name, e)
self.reset()
def _get_archive_policy_name(self, metric_name):
if self.conf.statsd.archive_policy_name:
return self.conf.statsd.archive_policy_name
# NOTE(sileht): We didn't catch NoArchivePolicyRuleMatch to log it
ap = self.indexer.get_archive_policy_for_metric(metric_name)
return ap.name
class StatsdServer(object):
def __init__(self, stats):
self.stats = stats
@staticmethod
def connection_made(transport):
pass
def datagram_received(self, data, addr):
LOG.debug("Received data `%r' from %s", data, addr)
try:
messages = [m for m in data.decode().split("\n") if m]
except Exception as e:
LOG.error("Unable to decode datagram: %s", e)
return
for message in messages:
metric = message.split("|")
if len(metric) == 2:
metric_name, metric_type = metric
sampling = None
elif len(metric) == 3:
metric_name, metric_type, sampling = metric
else:
LOG.error("Invalid number of | in `%s'", message)
continue
sampling = float(sampling[1:]) if sampling is not None else None
metric_name, metric_str_val = metric_name.split(':')
# NOTE(jd): We do not support +/- gauge, and we delete gauge on
# each flush.
value = float(metric_str_val)
try:
self.stats.treat_metric(metric_name, metric_type,
value, sampling)
except Exception as e:
LOG.error("Unable to treat metric %s: %s", message, str(e))
def start():
conf = service.prepare_service()
if conf.statsd.resource_id is None:
raise cfg.RequiredOptError("resource_id", cfg.OptGroup("statsd"))
stats = Stats(conf)
loop = asyncio.get_event_loop()
# TODO(jd) Add TCP support
listen = loop.create_datagram_endpoint(
lambda: StatsdServer(stats),
local_addr=(conf.statsd.host, conf.statsd.port))
def _flush():
loop.call_later(conf.statsd.flush_delay, _flush)
stats.flush()
loop.call_later(conf.statsd.flush_delay, _flush)
transport, protocol = loop.run_until_complete(listen)
LOG.info("Started on %s:%d", conf.statsd.host, conf.statsd.port)
LOG.info("Flush delay: %d seconds", conf.statsd.flush_delay)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
transport.close()
loop.close()
| {
"content_hash": "58a3de18c7ec78322127ac211dac297a",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 79,
"avg_line_length": 35.6353591160221,
"alnum_prop": 0.5485271317829458,
"repo_name": "leandroreox/gnocchi",
"id": "3db68a14f383bc1ccb2a34c2e9a9473454c70bd6",
"size": "7027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnocchi/statsd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1064"
},
{
"name": "Python",
"bytes": "807668"
},
{
"name": "Shell",
"bytes": "24197"
}
],
"symlink_target": ""
} |
from designate import objects
from designate.objects.adapters.api_v2 import base
class TsigKeyAPIv2Adapter(base.APIv2Adapter):
ADAPTER_OBJECT = objects.TsigKey
MODIFICATIONS = {
'fields': {
"id": {},
"name": {
'read_only': False
},
"algorithm": {
'read_only': False
},
"secret": {
'read_only': False
},
"scope": {
'read_only': False
},
"resource_id": {
'read_only': False
},
"created_at": {},
"updated_at": {},
},
'options': {
'links': True,
'resource_name': 'tsigkey',
'collection_name': 'tsigkeys',
}
}
class TsigKeyListAPIv2Adapter(base.APIv2Adapter):
ADAPTER_OBJECT = objects.TsigKeyList
MODIFICATIONS = {
'options': {
'links': True,
'resource_name': 'tsigkey',
'collection_name': 'tsigkeys',
}
}
| {
"content_hash": "a24f93f104e1417243ec6098632c42d6",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 50,
"avg_line_length": 24.65909090909091,
"alnum_prop": 0.43870967741935485,
"repo_name": "openstack/designate",
"id": "ff9535ebcc6fe53d4105c60507d74d3488d99a6b",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/objects/adapters/api_v2/tsigkey.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "71074"
},
{
"name": "Jinja",
"bytes": "2004"
},
{
"name": "Mako",
"bytes": "1012"
},
{
"name": "Python",
"bytes": "2442862"
},
{
"name": "Shell",
"bytes": "46200"
}
],
"symlink_target": ""
} |
import attr
from . import gentoo
from .utils import remove_spaces
class InvalidVersion(ValueError):
pass
def is_valid_alpine_version(s):
"""
Return True is the string `s` is a valid Alpine version.
We do not support yet version strings that start with
non-significant zeros.
For example:
>>> is_valid_alpine_version("006")
False
>>> is_valid_alpine_version("1.2.3")
True
>>> is_valid_alpine_version("02-r1")
False
"""
left, _, _ = s.partition(".")
# hanlde the suffix case
left, _, _ = left.partition("-")
if not left.isdigit():
return True
i = int(left)
return str(i) == left
@attr.s(frozen=True, order=False, hash=True)
class Version:
"""
Base version mixin to subclass for each version syntax implementation.
Each version subclass is:
- immutable and hashable
- comparable and orderable e.g., such as implementing all rich comparison
operators or implementing functools.total_ordering. The default is to
compare the value as-is.
"""
# the original string used to build this Version
string = attr.ib(type=str)
# the normalized string for this Version, stored without spaces and
# lowercased. Any leading v is removed too.
normalized_string = attr.ib(type=str, default=None, repr=False)
# a comparable scheme-specific version object constructed from
# the version string
value = attr.ib(default=None, repr=False)
def __attrs_post_init__(self):
normalized_string = self.normalize(self.string)
if not self.is_valid(normalized_string):
raise InvalidVersion(f"{self.string!r} is not a valid {self.__class__!r}")
# Set the normalized string as default value
# Notes: setattr is used because this is an immutable frozen instance.
# See https://www.attrs.org/en/stable/init.html?#post-init
object.__setattr__(self, "normalized_string", normalized_string)
value = self.build_value(normalized_string)
object.__setattr__(self, "value", value)
@classmethod
def is_valid(cls, string):
"""
Return True if the ``string`` is a valid version for its scheme or False
if not valid. The empty string, None, False and 0 are considered invalid.
Subclasses should implement this.
"""
return bool(string)
@classmethod
def normalize(cls, string):
"""
Return a normalized version string from ``string ``. Subclass can override.
"""
# FIXME: Is removing spaces and strip v the right thing to do?
return remove_spaces(string).rstrip("v ").strip()
@classmethod
def build_value(cls, string):
"""
Return a wrapped version "value" object for a version ``string``.
Subclasses can override. The default is a no-op and returns the string
as-is, and is called by default at init time with the computed
normalized_string.
"""
return string
def satisfies(self, constraint):
"""
Return True is this Version satisfies the ``constraint``
VersionConstraint. Satisfying means that this version is "within" the
``constraint``.
"""
return self in constraint
def __str__(self):
return str(self.value)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.value.__eq__(other.value)
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.value.__lt__(other.value)
def __gt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.value.__gt__(other.value)
def __le__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.value.__le__(other.value)
def __ge__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.value.__ge__(other.value)
@attr.s(frozen=True, order=False, eq=False, hash=True)
class AlpineLinuxVersion(Version):
"""Alpine linux version"""
@classmethod
def is_valid(cls, string):
return is_valid_alpine_version(string) and gentoo.is_valid(string)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return gentoo.vercmp(self.value, other.value) == 0
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return gentoo.vercmp(self.value, other.value) < 0
def __gt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return gentoo.vercmp(self.value, other.value) > 0
| {
"content_hash": "98eaa9283ee401fbf3d57f48e6f31de1",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 80,
"avg_line_length": 29.640522875816995,
"alnum_prop": 0.6760749724366042,
"repo_name": "google/osv.dev",
"id": "fb69334ffbef077797762aa6e08d640b29ef7b20",
"size": "4709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osv/third_party/univers/alpine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "10995"
},
{
"name": "Go",
"bytes": "133088"
},
{
"name": "HTML",
"bytes": "30598"
},
{
"name": "JavaScript",
"bytes": "5795"
},
{
"name": "Makefile",
"bytes": "1325"
},
{
"name": "Python",
"bytes": "359843"
},
{
"name": "SCSS",
"bytes": "20439"
},
{
"name": "Shell",
"bytes": "17923"
}
],
"symlink_target": ""
} |
"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing
for shapes.
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.pens.basePen import BasePen
from fontTools.misc.bezierTools import solveQuadratic, solveCubic
__all__ = ["PointInsidePen"]
class PointInsidePen(BasePen):
"""This pen implements "point inside" testing: to test whether
a given point lies inside the shape (black) or outside (white).
Instances of this class can be recycled, as long as the
setTestPoint() method is used to set the new point to test.
Typical usage:
pen = PointInsidePen(glyphSet, (100, 200))
outline.draw(pen)
isInside = pen.getResult()
Both the even-odd algorithm and the non-zero-winding-rule
algorithm are implemented. The latter is the default, specify
True for the evenOdd argument of __init__ or setTestPoint
to use the even-odd algorithm.
"""
# This class implements the classical "shoot a ray from the test point
# to infinity and count how many times it intersects the outline" (as well
# as the non-zero variant, where the counter is incremented if the outline
# intersects the ray in one direction and decremented if it intersects in
# the other direction).
# I found an amazingly clear explanation of the subtleties involved in
# implementing this correctly for polygons here:
# http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
# I extended the principles outlined on that page to curves.
def __init__(self, glyphSet, testPoint, evenOdd=False):
BasePen.__init__(self, glyphSet)
self.setTestPoint(testPoint, evenOdd)
def setTestPoint(self, testPoint, evenOdd=False):
"""Set the point to test. Call this _before_ the outline gets drawn."""
self.testPoint = testPoint
self.evenOdd = evenOdd
self.firstPoint = None
self.intersectionCount = 0
def getWinding(self):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
return self.intersectionCount
def getResult(self):
"""After the shape has been drawn, getResult() returns True if the test
point lies within the (black) shape, and False if it doesn't.
"""
winding = self.getWinding()
if self.evenOdd:
result = winding % 2
else: # non-zero
result = self.intersectionCount != 0
return not not result
def _addIntersection(self, goingUp):
if self.evenOdd or goingUp:
self.intersectionCount += 1
else:
self.intersectionCount -= 1
def _moveTo(self, point):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
self.firstPoint = point
def _lineTo(self, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = point
if x1 < x and x2 < x:
return
if y1 < y and y2 < y:
return
if y1 >= y and y2 >= y:
return
dx = x2 - x1
dy = y2 - y1
t = (y - y1) / dy
ix = dx * t + x1
if ix < x:
return
self._addIntersection(y2 > y1)
def _curveToOne(self, bcp1, bcp2, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp1
x3, y3 = bcp2
x4, y4 = point
if x1 < x and x2 < x and x3 < x and x4 < x:
return
if y1 < y and y2 < y and y3 < y and y4 < y:
return
if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
return
dy = y1
cy = (y2 - dy) * 3.0
by = (y3 - y2) * 3.0 - cy
ay = y4 - dy - cy - by
solutions = sorted(solveCubic(ay, by, cy, dy - y))
solutions = [t for t in solutions if -0. <= t <= 1.]
if not solutions:
return
dx = x1
cx = (x2 - dx) * 3.0
bx = (x3 - x2) * 3.0 - cx
ax = x4 - dx - cx - bx
above = y1 >= y
lastT = None
for t in solutions:
if t == lastT:
continue
lastT = t
t2 = t * t
t3 = t2 * t
direction = 3*ay*t2 + 2*by*t + cy
incomingGoingUp = outgoingGoingUp = direction > 0.0
if direction == 0.0:
direction = 6*ay*t + 2*by
outgoingGoingUp = direction > 0.0
incomingGoingUp = not outgoingGoingUp
if direction == 0.0:
direction = ay
incomingGoingUp = outgoingGoingUp = direction > 0.0
xt = ax*t3 + bx*t2 + cx*t + dx
if xt < x:
continue
if t in (0.0, -0.0):
if not outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
elif t == 1.0:
if incomingGoingUp:
self._addIntersection(incomingGoingUp)
else:
if incomingGoingUp == outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
#else:
# we're not really intersecting, merely touching
def _qCurveToOne_unfinished(self, bcp, point):
# XXX need to finish this, for now doing it through a cubic
# (BasePen implements _qCurveTo in terms of a cubic) will
# have to do.
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp
x3, y3 = point
c = y1
b = (y2 - c) * 2.0
a = y3 - c - b
solutions = sorted(solveQuadratic(a, b, c - y))
solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON]
if not solutions:
return
# XXX
def _closePath(self):
if self._getCurrentPoint() != self.firstPoint:
self.lineTo(self.firstPoint)
self.firstPoint = None
def _endPath(self):
"""Insideness is not defined for open contours."""
raise NotImplementedError
| {
"content_hash": "2ef1f14d1d13abaf825a062f6231ba5c",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 83,
"avg_line_length": 27.828125,
"alnum_prop": 0.6704098820887142,
"repo_name": "Pal3love/otRebuilder",
"id": "3311841bd1130e5706caaa801b13951a5a4ff353",
"size": "5343",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Package/otRebuilder/Dep/fontTools/pens/pointInsidePen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2756220"
}
],
"symlink_target": ""
} |
import os
from hashlib import md5
import requests
class RClient(object):
def __init__(self, username, password, soft_id, soft_key):
self.base_params = {
'username': username,
'password': md5(password.encode('utf-8')).hexdigest(),
'softid': soft_id,
'softkey': soft_key,
}
self.headers = {
'Connection': 'Keep-Alive',
'Expect': '100-continue',
'User-Agent': 'ben',
}
def rk_create(self, im, im_type, timeout=60):
params = {
'typeid': im_type,
'timeout': timeout,
}
params.update(self.base_params)
files = {'image': ('a.jpg', im)}
r = requests.post('http://api.ruokuai.com/create.json', data=params, files=files, headers=self.headers)
return r.json()
def rk_report_error(self, im_id):
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://api.ruokuai.com/reporterror.json', data=params, headers=self.headers)
return r.json()
def __identify_image_callback(img, code):
try:
username = os.environ['rk_username']
password = os.environ['rk_password']
id_ = os.environ['rk_id']
key = os.environ['rk_key']
rc = RClient(username, password, id_, key)
result = rc.rk_create(img, code)
print('验证码:', result['Result'])
return result['Result']
except Exception:
raise Exception('识别验证码错误')
def identify_image_callback_ruokuai_sogou(img):
return __identify_image_callback(img, 3060)
def identify_image_callback_ruokuai_weixin(img):
return __identify_image_callback(img, 3040)
| {
"content_hash": "3d9f5674c386210a9f5670e9827ac0d4",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 111,
"avg_line_length": 29.491525423728813,
"alnum_prop": 0.5678160919540229,
"repo_name": "Chyroc/WechatSogou",
"id": "740e410acccb6b92979894ced28eba8424e5d02a",
"size": "1787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/rk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "Python",
"bytes": "115086"
}
],
"symlink_target": ""
} |
import asyncio
import discord
from discord.ext import commands
from cogs.utils import checks
from cogs.utils.storage import RedisDict
class TemporaryVoice:
"""A cog to create TeamSpeak-like voice channels."""
def __init__(self, liara):
self.liara = liara
self.config = RedisDict('pandentia.tempvoice', liara.redis)
self.config_default = {'channel': None, 'limit': 0}
self.tracked_channels = set()
def __unload(self):
self.config.close()
def filter(self, channels):
_channels = []
for channel in channels:
if channel.name.startswith('Temp: ') or channel.id in self.tracked_channels:
_channels.append(channel)
return _channels
async def create_channel(self, member: discord.Member):
guild = member.guild
overwrites = {
guild.default_role: discord.PermissionOverwrite(connect=False),
member: discord.PermissionOverwrite(connect=True, manage_channels=True, manage_roles=True)
}
channel = await guild.create_voice_channel(('Temp: {}\'s Channel'.format(member.name))[0:32],
overwrites=overwrites)
self.tracked_channels.add(channel.id)
await member.move_to(channel)
async def on_voice_state_update(self, member, *_):
guild = member.guild
if guild is None:
return # /shrug
if self.config.get(guild.id) is None:
return
# lobby processing
channel = self.liara.get_channel(self.config[guild.id]['channel'])
if channel is None:
return
for member in channel.members:
try:
await self.create_channel(member)
except discord.Forbidden:
pass
# empty channel cleanup
await asyncio.sleep(1) # wait for the dust to settle
channels = self.filter(guild.voice_channels)
for channel in channels:
if len(channel.members) == 0:
try:
await channel.delete()
self.tracked_channels.remove(channel.id)
except discord.NotFound or KeyError:
pass
async def on_channel_update(self, before, after):
if before.id not in self.tracked_channels:
return
if before.name != after.name:
await after.edit(name=before.name)
@commands.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_channels=True)
async def create_lobby(self, ctx):
"""Creates a temporary voice lobby."""
config = self.config.get(ctx.guild.id, self.config_default)
if config['channel'] is not None:
channel = self.liara.get_channel(config['channel'])
if channel is not None:
await ctx.send('You need to remove the original lobby before creating another one.')
return
try:
channel = await ctx.guild.create_voice_channel('Lobby', overwrites={
ctx.guild.default_role: discord.PermissionOverwrite(speak=False)})
if self.config.get(ctx.guild.id) is None:
config['channel'] = channel.id
self.config[ctx.guild.id] = config
else:
self.config[ctx.guild.id]['channel'] = channel.id
self.config.commit(ctx.guild.id)
await ctx.send('Channel created! You can rename it to whatever you want now.')
except discord.Forbidden:
await ctx.send('It would appear that I don\'t have permissions to create channels.')
def setup(liara):
liara.add_cog(TemporaryVoice(liara))
| {
"content_hash": "8d4d3462877f2c89a68ca116cef27ea3",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 102,
"avg_line_length": 38.36082474226804,
"alnum_prop": 0.5958075786079011,
"repo_name": "Pandentia/Liara-Cogs",
"id": "fc0f80b7693af0ccec93f32801a503c35c4da057",
"size": "3721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogs/tempvoice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11128"
}
],
"symlink_target": ""
} |
import grpc
from google.cloud.container_v1beta1.proto import (
cluster_service_pb2 as google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ClusterManagerStub(object):
"""Google Kubernetes Engine Cluster Manager v1beta1
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListClusters = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListClusters",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.FromString,
)
self.GetCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Cluster.FromString,
)
self.CreateCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CreateCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolAutoscaling = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLoggingService = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLoggingService",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMonitoringService = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMonitoringService",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetAddonsConfig = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetAddonsConfig",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLocations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLocations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateMaster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/UpdateMaster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMasterAuth = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMasterAuth",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteCluster = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/DeleteCluster",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListOperations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.FromString,
)
self.GetOperation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetOperation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CancelOperation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CancelOperation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetServerConfig = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetServerConfig",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ServerConfig.FromString,
)
self.ListNodePools = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListNodePools",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.FromString,
)
self.GetNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/GetNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.NodePool.FromString,
)
self.CreateNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CreateNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteNodePool = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/DeleteNodePool",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.RollbackNodePoolUpgrade = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolManagement = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolManagement",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLabels = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLabels",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLegacyAbac = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetLegacyAbac",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.StartIPRotation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/StartIPRotation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CompleteIPRotation = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/CompleteIPRotation",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolSize = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNodePoolSize",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNetworkPolicy = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetNetworkPolicy",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMaintenancePolicy = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/SetMaintenancePolicy",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListUsableSubnetworks = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListUsableSubnetworks",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksResponse.FromString,
)
self.ListLocations = channel.unary_unary(
"/google.container.v1beta1.ClusterManager/ListLocations",
request_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsResponse.FromString,
)
class ClusterManagerServicer(object):
"""Google Kubernetes Engine Cluster Manager v1beta1
"""
def ListClusters(self, request, context):
"""Lists all clusters owned by a project in either the specified zone or all
zones.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetCluster(self, request, context):
"""Gets the details for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateCluster(self, request, context):
"""Creates a cluster, consisting of the specified number and type of Google
Compute Engine instances.
By default, the cluster is created in the project's
[default network](/compute/docs/networks-and-firewalls#networks).
One firewall is added for the cluster. After cluster creation,
the cluster creates routes for each node to allow the containers
on that node to communicate with all other instances in the
cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range is being used by the cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateCluster(self, request, context):
"""Updates the settings for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateNodePool(self, request, context):
"""Updates the version and/or image type of a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolAutoscaling(self, request, context):
"""Sets the autoscaling settings of a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLoggingService(self, request, context):
"""Sets the logging service for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMonitoringService(self, request, context):
"""Sets the monitoring service for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetAddonsConfig(self, request, context):
"""Sets the addons for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLocations(self, request, context):
"""Sets the locations for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateMaster(self, request, context):
"""Updates the master for a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMasterAuth(self, request, context):
"""Used to set master auth materials. Currently supports :-
Changing the admin password for a specific cluster.
This can be either via password generation or explicitly set.
Modify basic_auth.csv and reset the K8S API server.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteCluster(self, request, context):
"""Deletes the cluster, including the Kubernetes endpoint and all worker
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster
(e.g. load balancer resources) will not be deleted if they weren't present
at the initial create time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListOperations(self, request, context):
"""Lists all operations in a project in a specific zone or all zones.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetOperation(self, request, context):
"""Gets the specified operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelOperation(self, request, context):
"""Cancels the specified operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetServerConfig(self, request, context):
"""Returns configuration info about the Kubernetes Engine service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListNodePools(self, request, context):
"""Lists the node pools for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetNodePool(self, request, context):
"""Retrieves the node pool requested.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateNodePool(self, request, context):
"""Creates a node pool for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteNodePool(self, request, context):
"""Deletes a node pool from a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RollbackNodePoolUpgrade(self, request, context):
"""Roll back the previously Aborted or Failed NodePool upgrade.
This will be an no-op if the last upgrade successfully completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolManagement(self, request, context):
"""Sets the NodeManagement options for a node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLabels(self, request, context):
"""Sets labels on a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetLegacyAbac(self, request, context):
"""Enables or disables the ABAC authorization mechanism on a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def StartIPRotation(self, request, context):
"""Start master IP rotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CompleteIPRotation(self, request, context):
"""Completes master IP rotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNodePoolSize(self, request, context):
"""Sets the size for a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetNetworkPolicy(self, request, context):
"""Enables/Disables Network Policy for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetMaintenancePolicy(self, request, context):
"""Sets the maintenance policy for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListUsableSubnetworks(self, request, context):
"""Lists subnetworks that are usable for creating clusters in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListLocations(self, request, context):
"""Used to fetch locations that offer GKE.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_ClusterManagerServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListClusters": grpc.unary_unary_rpc_method_handler(
servicer.ListClusters,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.SerializeToString,
),
"GetCluster": grpc.unary_unary_rpc_method_handler(
servicer.GetCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Cluster.SerializeToString,
),
"CreateCluster": grpc.unary_unary_rpc_method_handler(
servicer.CreateCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateCluster": grpc.unary_unary_rpc_method_handler(
servicer.UpdateCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateNodePool": grpc.unary_unary_rpc_method_handler(
servicer.UpdateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolAutoscaling": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolAutoscaling,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLoggingService": grpc.unary_unary_rpc_method_handler(
servicer.SetLoggingService,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMonitoringService": grpc.unary_unary_rpc_method_handler(
servicer.SetMonitoringService,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetAddonsConfig": grpc.unary_unary_rpc_method_handler(
servicer.SetAddonsConfig,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLocations": grpc.unary_unary_rpc_method_handler(
servicer.SetLocations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"UpdateMaster": grpc.unary_unary_rpc_method_handler(
servicer.UpdateMaster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMasterAuth": grpc.unary_unary_rpc_method_handler(
servicer.SetMasterAuth,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"DeleteCluster": grpc.unary_unary_rpc_method_handler(
servicer.DeleteCluster,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"ListOperations": grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.SerializeToString,
),
"GetOperation": grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"CancelOperation": grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetServerConfig": grpc.unary_unary_rpc_method_handler(
servicer.GetServerConfig,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ServerConfig.SerializeToString,
),
"ListNodePools": grpc.unary_unary_rpc_method_handler(
servicer.ListNodePools,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.SerializeToString,
),
"GetNodePool": grpc.unary_unary_rpc_method_handler(
servicer.GetNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.NodePool.SerializeToString,
),
"CreateNodePool": grpc.unary_unary_rpc_method_handler(
servicer.CreateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"DeleteNodePool": grpc.unary_unary_rpc_method_handler(
servicer.DeleteNodePool,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"RollbackNodePoolUpgrade": grpc.unary_unary_rpc_method_handler(
servicer.RollbackNodePoolUpgrade,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolManagement": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolManagement,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLabels": grpc.unary_unary_rpc_method_handler(
servicer.SetLabels,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetLegacyAbac": grpc.unary_unary_rpc_method_handler(
servicer.SetLegacyAbac,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"StartIPRotation": grpc.unary_unary_rpc_method_handler(
servicer.StartIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"CompleteIPRotation": grpc.unary_unary_rpc_method_handler(
servicer.CompleteIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNodePoolSize": grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolSize,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetNetworkPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetNetworkPolicy,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"SetMaintenancePolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetMaintenancePolicy,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
"ListUsableSubnetworks": grpc.unary_unary_rpc_method_handler(
servicer.ListUsableSubnetworks,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListUsableSubnetworksResponse.SerializeToString,
),
"ListLocations": grpc.unary_unary_rpc_method_handler(
servicer.ListLocations,
request_deserializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1beta1_dot_proto_dot_cluster__service__pb2.ListLocationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.container.v1beta1.ClusterManager", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| {
"content_hash": "1418b845076dc91e670517010aefdd8f",
"timestamp": "",
"source": "github",
"line_count": 601,
"max_line_length": 156,
"avg_line_length": 61.673876871880196,
"alnum_prop": 0.717369017428371,
"repo_name": "tseaver/google-cloud-python",
"id": "f722b4a8133853ddcde5fa36ed0487ffd06f2773",
"size": "37136",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "container/google/cloud/container_v1beta1/proto/cluster_service_pb2_grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import os
import redis
import urlparse
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.wsgi import SharedDataMiddleware
from werkzeug.utils import redirect
from jinja2 import Environment, FileSystemLoader
class Shortly(object):
def __init__(self, config):
self.redis = redis.Redis(config['redis_host'], config['redis_port'])
def dispatch_request(self, request):
return Response('Hello World')
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def create_app(redis_host='localhost', redis_port=6379, with_static=True):
app = Shortly({
'redis_host': redis_host,
'redis_port': redis_port
})
if with_static:
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/static': os.path.join(os.path.dirname(__file__), 'static')
})
return app
if __name__ == '__main__':
from werkzeug.serving import run_simple
app = create_app()
run_simple('127.0.0.1', 5000, app, use_debugger=True, use_reloader=True)
| {
"content_hash": "5da8a1a24993f306e1c9ac935097a61f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 29.91111111111111,
"alnum_prop": 0.6760772659732541,
"repo_name": "hugoxia/Python",
"id": "06737ca6c622cac8c79bb9b27200467012b1e1cf",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "werkzeug-demo/shortly/shortly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "394"
},
{
"name": "HTML",
"bytes": "4511"
},
{
"name": "JavaScript",
"bytes": "1426"
},
{
"name": "Python",
"bytes": "91850"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
} |
import logging
import gen
from gen.build_deploy.bash import onprem_source
# TODO(cmaloney): Should be able to pass an exact tree to gen so that we can test
# one little piece at a time rather than having to rework this every time that
# DC/OS parameters change.
def test_error_during_calc(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_ID', 'foobar')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
assert gen.validate({
'ip_detect_filename': 'not-a-existing-file',
'bootstrap_variant': '',
}, extra_sources=[onprem_source]) == {
'status': 'errors',
'errors': {
'ip_detect_contents': {'message': 'ip-detect script `not-a-existing-file` must exist'}
},
'unset': {
'bootstrap_url',
'cluster_name',
'exhibitor_storage_backend',
'master_discovery'
}
}
def test_error_during_validate(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_ID', 'foobar')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
assert gen.validate({
'bootstrap_url': '',
'bootstrap_variant': '',
'ip_detect_contents': '', # so that ip_detect_filename doesn't get used from onprem_source
'ip6_detect_contents': '',
'exhibitor_storage_backend': 'static',
'master_discovery': 'static',
'cluster_name': 'foobar',
'master_list': '["127.0.0.1"]',
}, extra_sources=[onprem_source]) == {
'status': 'errors',
'errors': {
'bootstrap_url': {'message': 'Should be a url (http://example.com/bar or file:///path/to/local/cache)'},
},
'unset': set()
}
def test_error_during_validate_calico_network(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_ID', 'foobar')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
assert gen.validate({
'bootstrap_url': '',
'bootstrap_variant': '',
'ip_detect_contents': '', # so that ip_detect_filename doesn't get used from onprem_source
'ip6_detect_contents': '',
'exhibitor_storage_backend': 'static',
'master_discovery': 'static',
'cluster_name': 'foobar',
'master_list': '["127.0.0.1"]',
'calico_network_cidr': '',
}, extra_sources=[onprem_source]) == {
'status': 'errors',
'errors': {
'calico_network_cidr': {
'message': 'Incorrect value for `calico_network_cidr`: ``. Only IPv4 subnets are allowed'
},
'bootstrap_url': {
'message': 'Should be a url (http://example.com/bar or file:///path/to/local/cache)'
}
},
'unset': set()
}
| {
"content_hash": "d9a37b8a0ef138d0dca5bc8920445d1c",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 116,
"avg_line_length": 34.93589743589744,
"alnum_prop": 0.5761467889908257,
"repo_name": "dcos/dcos",
"id": "e15b9fb72f3dfe11b8db17e7b139e94018277c37",
"size": "2725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gen/tests/test_validate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2529"
},
{
"name": "Dockerfile",
"bytes": "9395"
},
{
"name": "Go",
"bytes": "5104"
},
{
"name": "Groovy",
"bytes": "711"
},
{
"name": "HCL",
"bytes": "14047"
},
{
"name": "HTML",
"bytes": "91122"
},
{
"name": "Lua",
"bytes": "200521"
},
{
"name": "Makefile",
"bytes": "8767"
},
{
"name": "PowerShell",
"bytes": "230"
},
{
"name": "Python",
"bytes": "1625906"
},
{
"name": "Shell",
"bytes": "102887"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_fsm
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Upload",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"guid",
models.UUIDField(
default=uuid.uuid4, unique=True, verbose_name="GUID"
),
),
("state", django_fsm.FSMField(default="initial", max_length=50)),
("upload_offset", models.BigIntegerField(default=0)),
("upload_length", models.BigIntegerField(default=-1)),
("upload_metadata", models.TextField(blank=True)),
("filename", models.CharField(blank=True, max_length=255)),
("temporary_file_path", models.CharField(max_length=4096, null=True)),
("expires", models.DateTimeField(blank=True, null=True)),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
]
| {
"content_hash": "1429673b9f0697eeb3800011e08c2856",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 86,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.4483695652173913,
"repo_name": "fin/froide",
"id": "ff62019a28a1385d15f19abca9900d02fff8d467",
"size": "1889",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/upload/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
} |
from twisted.web.resource import Resource
class WSGIRootResource(Resource):
def __init__(self, wsgiResource, children):
"""
Creates a Twisted Web root resource.
"""
Resource.__init__(self)
self._wsgiResource = wsgiResource
self.children = children
def getChild(self, path, request):
request.prepath.pop()
request.postpath.insert(0, path)
return self._wsgiResource
| {
"content_hash": "e5308131a8c528c4fd24511fc288ec4d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 47,
"avg_line_length": 29.733333333333334,
"alnum_prop": 0.6300448430493274,
"repo_name": "cravler/flask-twisted",
"id": "e7b46a74e85ddc9c3e7f0b8aff9477a878b20243",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_twisted/resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4703"
}
],
"symlink_target": ""
} |
from typing import List
import databases
import sqlalchemy
from fastapi import FastAPI
from pydantic import BaseModel
# SQLAlchemy specific code, as with any other app
DATABASE_URL = "sqlite:///./test.db"
# DATABASE_URL = "postgresql://user:password@postgresserver/db"
database = databases.Database(DATABASE_URL)
metadata = sqlalchemy.MetaData()
notes = sqlalchemy.Table(
"notes",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("text", sqlalchemy.String),
sqlalchemy.Column("completed", sqlalchemy.Boolean),
)
engine = sqlalchemy.create_engine(
DATABASE_URL, connect_args={"check_same_thread": False}
)
metadata.create_all(engine)
class NoteIn(BaseModel):
text: str
completed: bool
class Note(BaseModel):
id: int
text: str
completed: bool
app = FastAPI()
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
@app.get("/notes/", response_model=List[Note])
async def read_notes():
query = notes.select()
return await database.fetch_all(query)
@app.post("/notes/", response_model=Note)
async def create_note(note: NoteIn):
query = notes.insert().values(text=note.text, completed=note.completed)
last_record_id = await database.execute(query)
return {**note.dict(), "id": last_record_id}
| {
"content_hash": "e7679bb496a5aedd57b11cea877483f1",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 75,
"avg_line_length": 21.76923076923077,
"alnum_prop": 0.7102473498233216,
"repo_name": "tiangolo/fastapi",
"id": "cbf43d790f69b416a0530af00eeea00edfa7d227",
"size": "1415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_src/async_sql_databases/tutorial001.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
} |
import os
import subprocess
#
# Checks all files in 'files' to be self-sufficient by creating a simple source file with contents:
# #include <file>
# int main() {}
# and hands it over to the compiler. In the case of an error, the compiler output is printed and an exception thrown.
#
def run_include_check(files, compiler_options, compiler_name = "g++"):
for filename in files:
#write simple checker file and compile:
print "Testing " + filename
sourcefilename = "test-self-sufficient.cpp"
file = open(sourcefilename, "w")
file.write('#include "' + filename + '"' + "\n")
file.write("int main() { return 0; }")
file.close()
try:
subprocess.check_output(compiler_name + " " + sourcefilename + " " + compiler_options, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print "ERROR: ",e.output
raise
#
# Iterates through
#
def run_check(root_path, exclude_dirs, exclude_files, compiler_options, compiler_name = "g++"):
files_to_check=[]
for root, dirs, files in os.walk(root_path):
for filename in files:
if not root in exclude_dirs:
if not filename in exclude_files:
files_to_check.append(os.path.join(root, filename))
run_include_check(files_to_check, compiler_options, compiler_name)
###
rootPath = '../viennagrid'
print " --- Running self sufficiency tests --- "
exclude_files=[
"seed_point_segmenting.hpp",
"serialization.hpp"
]
exclude_dirs=[]
run_check(rootPath, exclude_dirs, exclude_files, "-I..")
| {
"content_hash": "a42707adc0c1e952dc38dcd7376516c1",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 130,
"avg_line_length": 29.07547169811321,
"alnum_prop": 0.6768332251784556,
"repo_name": "viennagrid/viennagrid-dev",
"id": "dea6eb4eeba73321820f5dfba234923b0f244438",
"size": "1872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/checkincludes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1771084"
},
{
"name": "CMake",
"bytes": "14141"
},
{
"name": "Python",
"bytes": "1872"
},
{
"name": "Shell",
"bytes": "3090"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('psoriassist', '0004_auto_20160313_1405'),
]
operations = [
migrations.AlterField(
model_name='lesion',
name='image',
field=models.CharField(blank=True, max_length=2000),
),
]
| {
"content_hash": "23a03c565dbd82e206d6162efd20a0a8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 64,
"avg_line_length": 22.944444444444443,
"alnum_prop": 0.5738498789346247,
"repo_name": "maxprais/psoriassist",
"id": "5a5473ef2df0de9a9ce5b06190b3d4664f626ee2",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psoriassist/migrations/0005_auto_20160313_1409.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "550936"
},
{
"name": "HTML",
"bytes": "208444"
},
{
"name": "JavaScript",
"bytes": "948239"
},
{
"name": "Python",
"bytes": "34713"
},
{
"name": "Shell",
"bytes": "470"
}
],
"symlink_target": ""
} |
import json
import urllib2
from ..models import Airport
from .. import BasicAPI
ENDPOINT_URL = 'https://api.flightstats.com/flex/airports/rest/v1/json/'
class Flightstats(BasicAPI):
is_multilingual = True
def __init__(self,*args,**kwargs):
self.app_id = kwargs['app_id']
self.app_key = kwargs['app_key']
def download(self,language=('en','English')):
url = '%sactive?appId=%s&appKey=%s&extendedOptions=+languageCode:%s' % (ENDPOINT_URL, self.app_id,
self.app_key, language[0])
print 'Fetching %s Airports from Flighstats.com' % language[1]
data = json.load(urllib2.urlopen(url))
return data
def import_data(self, data, languages=None):
print 'Starting import'
if languages is None:
loop_data = data
else:
loop_data = data[languages[0][0]]
for i,tmp in enumerate(loop_data['airports']):
try:
t = Airport.objects.get(iata=tmp.get('iata',None),icao= tmp.get('icao',None))
except Airport.DoesNotExist:
t = Airport(iata=tmp.get('iata',None),icao= tmp.get('icao',None))
if languages is not None:
for lang_code, lang in languages:
setattr(t, 'city_%s' % lang_code, data[lang_code]['airports'][i]['city'])
setattr(t, 'country_%s' % lang_code, data[lang_code]['airports'][i]['countryName'])
setattr(t, 'name_%s' % lang_code, data[lang_code]['airports'][i]['name'])
else:
setattr(t, 'city', tmp['city'])
setattr(t, 'country', tmp['countryName'])
setattr(t, 'name', tmp['name'])
setattr(t, 'country_code', tmp['countryCode'])
setattr(t, 'time_zone', tmp.get('timeZoneRegionName',None))
setattr(t, 'longitude', tmp.get('longitude',None))
setattr(t, 'latitude', tmp.get('latitude',None))
t.save()
| {
"content_hash": "3c488a8d7a2594ad0469e004a79a7f1c",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 107,
"avg_line_length": 40.57692307692308,
"alnum_prop": 0.5303317535545023,
"repo_name": "illing2005/django-airports-apis",
"id": "f8dcb7e47b8605b02b75f09546e279ca250d8472",
"size": "2110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airports/flightstats/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16156"
}
],
"symlink_target": ""
} |
from itertools import product
from fractions import gcd
def compute():
side_length = 50
# Triangles with the right-angle at (0, 0)
num_triangles = side_length ** 2
# Triangles with the right-angle elsewhere
for x, y in product(range(0, side_length + 1), repeat=2):
if x == 0 and y == 0:
continue
# Find smallest dx and dy
l = gcd(x, y)
dx = x / l
dy = y / l
x2 = x - dy
y2 = y + dx
# Apply the inverse slope, going left and up
while x2 >= 0 and y2 <= side_length and not (x == 0 and y == 0):
num_triangles += 1
x2 -= dy
y2 += dx
x2 = x + dy
y2 = y - dx
# Apply the inverse slope, going right and down
while x2 <= side_length and y2 >= 0 and not (x2 == 0 and y2 == 0):
num_triangles += 1
x2 += dy
y2 -= dx
return num_triangles
if __name__ == "__main__":
print(compute()) | {
"content_hash": "19e69940015bf584dc95eaea15265a27",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 68,
"avg_line_length": 19,
"alnum_prop": 0.5897129186602871,
"repo_name": "Bathlamos/Project-Euler-Solutions",
"id": "4948a5fde6257afe9b4c5baaa28fa6e823e7d5c2",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solutions/p091.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "410931"
}
],
"symlink_target": ""
} |
"""Wrapper around the exif library to extract and convert some information.
"""
import datetime
import fractions
import warnings
import exif
# Some helper classes for exif attributes, having customized string
# representations.
class ExposureTime(fractions.Fraction):
def __str__(self):
if self.denominator == 1:
return "%s sec" % (self.numerator)
else:
return "%s/%s sec" % (self.numerator, self.denominator)
class Aperture(float):
def __str__(self):
# In fact, we display the reciprocal of the aperture, the
# f-number, in the string representation.
if self == int(self):
return "f/%d" % (int(self))
else:
return "f/%.1f" % (float(self))
class FocalLength(float):
def __str__(self):
if self == int(self):
return "%d mm" % (int(self))
else:
return "%.1f mm" % (float(self))
class Exif(object):
OrientationXlate = {
1: 'Horizontal (normal)',
2: 'Mirror horizontal',
3: 'Rotate 180',
4: 'Mirror vertical',
5: 'Mirror horizontal and rotate 270 CW',
6: 'Rotate 90 CW',
7: 'Mirror horizontal and rotate 90 CW',
8: 'Rotate 270 CW',
}
def __init__(self, path):
with path.open("rb") as f:
self._exif = exif.Image(f)
@property
def createDate(self):
"""Time and date the image was taken."""
try:
dt = self._exif.datetime_original
except (AttributeError, KeyError):
return None
else:
return datetime.datetime.strptime(dt, "%Y:%m:%d %H:%M:%S")
@property
def orientation(self):
"""Orientation of the camera relative to the scene."""
try:
orientation = self._exif.orientation
except (AttributeError, KeyError):
return None
else:
return self.OrientationXlate[int(orientation)]
@property
def gpsPosition(self):
"""GPS coordinates."""
try:
lat_tuple = self._exif.gps_latitude
lon_tuple = self._exif.gps_longitude
latref = self._exif.gps_latitude_ref
lonref = self._exif.gps_longitude_ref
except (AttributeError, KeyError):
return None
else:
lat = lat_tuple[0] + lat_tuple[1]/60 + lat_tuple[2]/3600
lon = lon_tuple[0] + lon_tuple[1]/60 + lon_tuple[2]/3600
return { latref:lat, lonref:lon }
@property
def cameraModel(self):
"""Camera Model."""
try:
return self._exif.model
except (AttributeError, KeyError):
return None
@property
def exposureTime(self):
"""Exposure time."""
try:
et = self._exif.exposure_time
except (AttributeError, KeyError):
return None
else:
return ExposureTime(fractions.Fraction(et).limit_denominator())
@property
def aperture(self):
"""Aperture."""
try:
f = self._exif.f_number
except (AttributeError, KeyError):
return None
else:
return Aperture(f)
@property
def iso(self):
"""ISO speed rating."""
try:
return self._exif.photographic_sensitivity
except (AttributeError, KeyError):
return None
@property
def focalLength(self):
"""Lens focal length."""
try:
fl = self._exif.focal_length
except (AttributeError, KeyError):
return None
else:
return FocalLength(fl)
| {
"content_hash": "b50f3ba2f431d0daa8507e978ee0e303",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 75,
"avg_line_length": 27.704545454545453,
"alnum_prop": 0.5490839485917418,
"repo_name": "RKrahl/photo-tools",
"id": "f1ab6a3b0cca797341b0ad1db813a2a80029b891",
"size": "3657",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "photoidx/exif.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "385"
},
{
"name": "Python",
"bytes": "125547"
}
],
"symlink_target": ""
} |
import logging
import sys
import fiona
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
def test_collection_get():
with fiona.open('docs/data/test_uk.shp') as src:
result = src[5]
assert result['id'] == '5'
def test_collection_slice():
with fiona.open('docs/data/test_uk.shp') as src:
results = src[:5]
assert isinstance(results, list)
assert len(results) == 5
assert results[4]['id'] == '4'
def test_collection_iterator_slice():
with fiona.open('docs/data/test_uk.shp') as src:
results = list(src.items(5))
assert len(results) == 5
k, v = results[4]
assert k == 4
assert v['id'] == '4'
def test_collection_iterator_next():
with fiona.open('docs/data/test_uk.shp') as src:
k, v = next(src.items(5, None))
assert k == 5
assert v['id'] == '5'
def test_collection_iterator_items_slice():
with fiona.open('docs/data/test_uk.shp') as src:
l = len(src)
items = list(src.items(0, 5))
assert len(items) == 5
items = list(src.items(1, 5))
assert len(items) == 4
items = list(src.items(-5, None))
assert len(items) == 5
items = list(src.items(-5, -1))
assert len(items) == 4
items = list(src.items(0, None))
assert len(items) == l
items = list(src.items(5, None))
assert len(items) == (l - 5)
items = list(src.items(5, None, -1))
assert len(items) == 6
items = list(src.items(5, None, -2))
assert len(items) == 3
items = list(src.items(4, None, -2))
assert len(items) == 3
items = list(src.items(-1, -5, -1))
assert len(items) == 4
items = list(src.items(-5, None, -1))
assert len(items) == (l - 5 + 1)
def test_collection_iterator_keys_next():
with fiona.open('docs/data/test_uk.shp') as src:
k = next(src.keys(5, None))
assert k == 5
| {
"content_hash": "c60cbba968df0ef0b61833d778a2dcfa",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 59,
"avg_line_length": 26.573333333333334,
"alnum_prop": 0.5499247365780231,
"repo_name": "johanvdw/Fiona",
"id": "6e4d62a01f38c95549b884b794bace24e7c33790",
"size": "1993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_slice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "258319"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
from moto.core import ACCOUNT_ID
def try_parse_int(value, default=None):
try:
return int(value)
except (TypeError, ValueError):
return default
def parse_sg_attributes_from_dict(sg_attributes):
ip_protocol = sg_attributes.get("IpProtocol", [None])[0]
from_port = sg_attributes.get("FromPort", [None])[0]
to_port = sg_attributes.get("ToPort", [None])[0]
ip_ranges = []
ip_ranges_tree = sg_attributes.get("IpRanges") or {}
for ip_range_idx in sorted(ip_ranges_tree.keys()):
ip_range = {"CidrIp": ip_ranges_tree[ip_range_idx]["CidrIp"][0]}
if ip_ranges_tree[ip_range_idx].get("Description"):
ip_range["Description"] = ip_ranges_tree[ip_range_idx].get("Description")[0]
ip_ranges.append(ip_range)
source_groups = []
source_group_ids = []
groups_tree = sg_attributes.get("Groups") or {}
for group_idx in sorted(groups_tree.keys()):
group_dict = groups_tree[group_idx]
if "GroupId" in group_dict:
source_group_ids.append(group_dict["GroupId"][0])
elif "GroupName" in group_dict:
source_groups.append(group_dict["GroupName"][0])
return ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids
class SecurityGroups(BaseResponse):
def _process_rules_from_querystring(self):
group_name_or_id = self._get_param("GroupName") or self._get_param("GroupId")
querytree = {}
for key, value in self.querystring.items():
key_splitted = key.split(".")
key_splitted = [try_parse_int(e, e) for e in key_splitted]
d = querytree
for subkey in key_splitted[:-1]:
if subkey not in d:
d[subkey] = {}
d = d[subkey]
d[key_splitted[-1]] = value
if "IpPermissions" not in querytree:
# Handle single rule syntax
(
ip_protocol,
from_port,
to_port,
ip_ranges,
source_groups,
source_group_ids,
) = parse_sg_attributes_from_dict(querytree)
yield (
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_groups,
source_group_ids,
)
ip_permissions = querytree.get("IpPermissions") or {}
for ip_permission_idx in sorted(ip_permissions.keys()):
ip_permission = ip_permissions[ip_permission_idx]
(
ip_protocol,
from_port,
to_port,
ip_ranges,
source_groups,
source_group_ids,
) = parse_sg_attributes_from_dict(ip_permission)
yield (
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_groups,
source_group_ids,
)
def authorize_security_group_egress(self):
if self.is_not_dryrun("GrantSecurityGroupEgress"):
for args in self._process_rules_from_querystring():
self.ec2_backend.authorize_security_group_egress(*args)
return AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE
def authorize_security_group_ingress(self):
if self.is_not_dryrun("GrantSecurityGroupIngress"):
for args in self._process_rules_from_querystring():
self.ec2_backend.authorize_security_group_ingress(*args)
return AUTHORIZE_SECURITY_GROUP_INGRESS_RESPONSE
def create_security_group(self):
name = self._get_param("GroupName")
description = self._get_param("GroupDescription")
vpc_id = self._get_param("VpcId")
if self.is_not_dryrun("CreateSecurityGroup"):
group = self.ec2_backend.create_security_group(
name, description, vpc_id=vpc_id
)
template = self.response_template(CREATE_SECURITY_GROUP_RESPONSE)
return template.render(group=group)
def delete_security_group(self):
# TODO this should raise an error if there are instances in the group.
# See
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html
name = self._get_param("GroupName")
sg_id = self._get_param("GroupId")
if self.is_not_dryrun("DeleteSecurityGroup"):
if name:
self.ec2_backend.delete_security_group(name)
elif sg_id:
self.ec2_backend.delete_security_group(group_id=sg_id)
return DELETE_GROUP_RESPONSE
def describe_security_groups(self):
groupnames = self._get_multi_param("GroupName")
group_ids = self._get_multi_param("GroupId")
filters = filters_from_querystring(self.querystring)
groups = self.ec2_backend.describe_security_groups(
group_ids=group_ids, groupnames=groupnames, filters=filters
)
template = self.response_template(DESCRIBE_SECURITY_GROUPS_RESPONSE)
return template.render(groups=groups)
def revoke_security_group_egress(self):
if self.is_not_dryrun("RevokeSecurityGroupEgress"):
for args in self._process_rules_from_querystring():
success = self.ec2_backend.revoke_security_group_egress(*args)
if not success:
return "Could not find a matching egress rule", dict(status=404)
return REVOKE_SECURITY_GROUP_EGRESS_RESPONSE
def revoke_security_group_ingress(self):
if self.is_not_dryrun("RevokeSecurityGroupIngress"):
for args in self._process_rules_from_querystring():
self.ec2_backend.revoke_security_group_ingress(*args)
return REVOKE_SECURITY_GROUP_INGRESS_RESPONSE
CREATE_SECURITY_GROUP_RESPONSE = """<CreateSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
<groupId>{{ group.id }}</groupId>
</CreateSecurityGroupResponse>"""
DELETE_GROUP_RESPONSE = """<DeleteSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteSecurityGroupResponse>"""
DESCRIBE_SECURITY_GROUPS_RESPONSE = (
"""<DescribeSecurityGroupsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<securityGroupInfo>
{% for group in groups %}
<item>
<ownerId>"""
+ ACCOUNT_ID
+ """</ownerId>
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
<groupDescription>{{ group.description }}</groupDescription>
{% if group.vpc_id %}
<vpcId>{{ group.vpc_id }}</vpcId>
{% endif %}
<ipPermissions>
{% for rule in group.ingress_rules %}
<item>
<ipProtocol>{{ rule.ip_protocol }}</ipProtocol>
{% if rule.from_port is not none %}
<fromPort>{{ rule.from_port }}</fromPort>
{% endif %}
{% if rule.to_port is not none %}
<toPort>{{ rule.to_port }}</toPort>
{% endif %}
<groups>
{% for source_group in rule.source_groups %}
<item>
<userId>"""
+ ACCOUNT_ID
+ """</userId>
<groupId>{{ source_group.id }}</groupId>
<groupName>{{ source_group.name }}</groupName>
</item>
{% endfor %}
</groups>
<ipRanges>
{% for ip_range in rule.ip_ranges %}
<item>
<cidrIp>{{ ip_range['CidrIp'] }}</cidrIp>
{% if ip_range['Description'] %}
<description>{{ ip_range['Description'] }}</description>
{% endif %}
</item>
{% endfor %}
</ipRanges>
</item>
{% endfor %}
</ipPermissions>
<ipPermissionsEgress>
{% for rule in group.egress_rules %}
<item>
<ipProtocol>{{ rule.ip_protocol }}</ipProtocol>
{% if rule.from_port is not none %}
<fromPort>{{ rule.from_port }}</fromPort>
{% endif %}
{% if rule.to_port is not none %}
<toPort>{{ rule.to_port }}</toPort>
{% endif %}
<groups>
{% for source_group in rule.source_groups %}
<item>
<userId>"""
+ ACCOUNT_ID
+ """</userId>
<groupId>{{ source_group.id }}</groupId>
<groupName>{{ source_group.name }}</groupName>
</item>
{% endfor %}
</groups>
<ipRanges>
{% for ip_range in rule.ip_ranges %}
<item>
<cidrIp>{{ ip_range['CidrIp'] }}</cidrIp>
{% if ip_range['Description'] %}
<description>{{ ip_range['Description'] }}</description>
{% endif %}
</item>
{% endfor %}
</ipRanges>
</item>
{% endfor %}
</ipPermissionsEgress>
<tagSet>
{% for tag in group.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</securityGroupInfo>
</DescribeSecurityGroupsResponse>"""
)
AUTHORIZE_SECURITY_GROUP_INGRESS_RESPONSE = """<AuthorizeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</AuthorizeSecurityGroupIngressResponse>"""
REVOKE_SECURITY_GROUP_INGRESS_RESPONSE = """<RevokeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</RevokeSecurityGroupIngressResponse>"""
AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE = """
<AuthorizeSecurityGroupEgressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</AuthorizeSecurityGroupEgressResponse>"""
REVOKE_SECURITY_GROUP_EGRESS_RESPONSE = """<RevokeSecurityGroupEgressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</RevokeSecurityGroupEgressResponse>"""
| {
"content_hash": "aa8c9480782d0d670bdb695b39e6d8d3",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 135,
"avg_line_length": 40.2248322147651,
"alnum_prop": 0.5276549595395011,
"repo_name": "william-richard/moto",
"id": "60645e1659cd342c10fa0b41124fd019dee74e63",
"size": "11987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/ec2/responses/security_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
try:
import cPickle as pickle
except:
import pickle
from hashlib import md5
import time
import functools
import base64
from sqlalchemy.sql import text as _sql
from twisted.internet import reactor
from twisted.logger import Logger
CACHE_SET_EVENT = 'cache_set'
CACHE_DELETE_EVENT = 'cache_delete'
CACHE_UPDATE_EVENT = 'cache_update'
class CacheManager(object):
log = Logger()
def __init__(self, dbengine,cache_table='system_cache',cache_name="cache"):
self.cache_name = cache_name
self.dbengine = dbengine
self.cache_table = cache_table
self.get_total = 0
self.set_total = 0
self.hit_total = 0
self.update_total = 0
self.delete_total = 0
self.check_expire(first_delay=10)
self.print_hit_stat(first_delay=10)
def print_hit_stat(self, first_delay=0):
if first_delay > 0:
reactor.callLater(first_delay, self.check_expire)
logstr = """
----------------------- cache stat ----------------------
# cache name : {0}
# visit cache total : {1}
# add cache total : {2}
# hit cache total : {3}
# update cache total : {4}
# delete cache total : {5}
# current db cache total : {6}
---------------------------------------------------------
""".format(self.cache_name, self.get_total,self.set_total,self.hit_total,
self.update_total,self.delete_total,self.count())
self.log.info(logstr)
reactor.callLater(60.0, self.print_hit_stat)
def encode_data(self,data):
return base64.b64encode(pickle.dumps(data, pickle.HIGHEST_PROTOCOL))
def decode_data(self,raw_data):
return pickle.loads(base64.b64decode(raw_data))
def cache(self,prefix="cache",key_name=None, expire=3600):
def func_warp1(func):
@functools.wraps(func)
def func_wrap2(*args, **kargs):
if key_name and kargs.get(key_name):
key = "%s:%s" % (prefix, kargs.get(key_name))
else:
sig = md5(repr(args) + repr(kargs)).hexdigest()
key = "%s:%s:%s"%(prefix,func.__name__, sig)
data = self.get(key)
if data is not None:
return data
data = func(*args, **kargs)
if data is not None:
self.set(key, data, expire)
return data
return func_wrap2
return func_warp1
def aget(self, key, fetchfunc, *args, **kwargs):
result = self.get(key)
if result:
return result
if fetchfunc:
expire = kwargs.pop('expire',3600)
result = fetchfunc(*args,**kwargs)
if result:
self.set(key,result,expire=expire)
return result
def check_expire(self, first_delay=0):
if first_delay > 0:
reactor.callLater(first_delay, self.check_expire)
with self.dbengine.begin() as conn:
try:
conn.execute(_sql("delete from %s where _time > 0 and _time < :time" % self.cache_table),time=int(time.time()))
except:
pass
reactor.callLater(120.0, self.check_expire)
def get(self, key):
self.get_total += 1
raw_data = None
_del_func = self.delete
with self.dbengine.begin() as conn:
try:
cur = conn.execute(_sql("select _value, _time from %s where _key = :key " % self.cache_table),key=key)
_cache = cur.fetchone()
if _cache:
self.hit_total += 1
_time = int(_cache['_time'])
if _time > 0 and time.time() > _time:
reactor.callLater(0.01, _del_func, key,)
else:
raw_data = _cache['_value']
except:
import traceback
traceback.print_exc()
try:
if raw_data:
return self.decode_data(raw_data)
except:
self.delete(key)
return None
def event_cache_delete(self, key):
self.log.info("event: delete cache %s " % key)
self.delete(key)
def count(self):
with self.dbengine.begin() as conn:
try:
cur = conn.execute(_sql("select count(_key) as count from %s " % self.cache_table))
return int(cur.fetchone()['count'])
except:
self.log.error("cache count error")
return 0
def delete(self,key):
self.delete_total += 1
with self.dbengine.begin() as conn:
try:
conn.execute(_sql("delete from %s where _key = :key " % self.cache_table),key=key)
except:
import traceback
traceback.print_exc()
def event_cache_set(self, key, value, expire=0):
self.log.info("event: set cache %s " % key)
self.set(key, value, expire)
def set(self, key, value, expire=0):
self.set_total += 1
raw_data = self.encode_data(value)
with self.dbengine.begin() as conn:
_time = expire>0 and (int(time.time()) + int(expire)) or 0
try:
conn.execute(_sql("insert into %s values (:key, :value, :time) " % self.cache_table),
key=key,value=raw_data,time=_time)
except:
conn.execute(_sql("delete from %s where _key = :key " % self.cache_table),key=key)
conn.execute(_sql("insert into %s values (:key, :value, :time) " % self.cache_table),
key=key,value=raw_data,time=_time)
def event_cache_update(self, key, value, expire=0):
self.log.info("event: update cache %s " % key)
self.update(key, value, expire)
def update(self, key, value, expire=0):
self.update_total += 1
raw_data = self.encode_data(value)
with self.dbengine.begin() as conn:
_time = expire>0 and (int(time.time()) + int(expire)) or 0
try:
conn.execute(_sql("""update %s
set _value=:value, _time=:time
where _key=:key""" % self.cache_table),
key=key,value=raw_data,time=_time)
except:
import traceback
traceback.print_exc()
| {
"content_hash": "e5e8187fec6737320d7db9b91c702e2b",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 127,
"avg_line_length": 35.178378378378376,
"alnum_prop": 0.5175169022741242,
"repo_name": "talkincode/toughlib",
"id": "ebf89daf4be78c02f2e6ffe2b99668d5ce67410b",
"size": "6508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toughlib/db_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "344"
},
{
"name": "Python",
"bytes": "114890"
}
],
"symlink_target": ""
} |
"""Test the preciousblock RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_chain,
sync_blocks,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert(len(node_dest.getblock(blockhash, False)) > 0)
break
except:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 1)
(hashY, hashZ) = self.nodes[1].generate(2)
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
(hashA, hashB, hashC) = self.nodes[0].generate(3)
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
(hashE, hashF, hashG) = self.nodes[1].generate(3)
assert_equal(self.nodes[1].getblockcount(), 5)
assert(hashC != hashG)
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
connect_nodes_bi(self.nodes,0,1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 6)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generate(4)
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
| {
"content_hash": "a08bfaa8792187f1c14a48df1662fe02",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 127,
"avg_line_length": 44.42727272727273,
"alnum_prop": 0.6451810926949049,
"repo_name": "nmarley/dash",
"id": "99db817d2b98cef811ed42dbc2eae59092bed81e",
"size": "5096",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/preciousblock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28452"
},
{
"name": "C",
"bytes": "1369530"
},
{
"name": "C++",
"bytes": "7670346"
},
{
"name": "CMake",
"bytes": "2551"
},
{
"name": "CSS",
"bytes": "76026"
},
{
"name": "Dockerfile",
"bytes": "237"
},
{
"name": "GDB",
"bytes": "450"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "198556"
},
{
"name": "Makefile",
"bytes": "122320"
},
{
"name": "Objective-C",
"bytes": "28354"
},
{
"name": "Objective-C++",
"bytes": "9115"
},
{
"name": "Python",
"bytes": "1263218"
},
{
"name": "QMake",
"bytes": "825"
},
{
"name": "Ruby",
"bytes": "2175"
},
{
"name": "Shell",
"bytes": "58304"
}
],
"symlink_target": ""
} |
import unittest
import mechanize
from links import links
class LinksTest(unittest.TestCase):
"""Test para 'links.py'"""
def test_obtener_parametros_de_la_url(self):
url_unlam = 'http://www.unlam.edu.ar/index.php'
url_unlam_con_parametros = 'http://www.unlam.edu.ar/index.php?seccion=-1&accion=buscador'
url_google_con_parametros = 'https://www.google.com.ar/?gfe_rd=cr&dcr=0&ei=eUXWWZPVGcb_8AfYso_wAw&gws_rd=ssl'
self.assertEqual(links.obtener_parametros_de_la_url(url_unlam_con_parametros),
{'seccion':['-1'], 'accion':['buscador']}
)
self.assertEqual(links.obtener_parametros_de_la_url(url_unlam),
{}
)
self.assertEqual(links.obtener_parametros_de_la_url(url_google_con_parametros),
{'gfe_rd':['cr'], 'dcr':['0'], 'ei':['eUXWWZPVGcb_8AfYso_wAw'], 'gws_rd':['ssl']}
)
def test_obtener_scripts_desde_url(self):
url_blogger = 'https://www.blogger.com/about/?r=2'
dominio_blogger = 'https'
archivo_html_blogger = open('test/blogger_html.txt', 'r')
html_blogger = archivo_html_blogger.read()
archivo_scripts_blogger_1 = open('test/blogger_script_1.txt', 'r')
scripts_blogger_1 = archivo_scripts_blogger_1.read()
archivo_scripts_blogger_2 = open('test/blogger_script_2.txt', 'r')
scripts_blogger_2 = archivo_scripts_blogger_2.read()
lista_scripts_blogger = [str(scripts_blogger_1), str(scripts_blogger_2)]
links._compilar_regex(r'(?!^//|\bhttp\b)[A-Za-z0-9_\-//]*\.\w*',
r'(?!^//|\bhttp\b)([A-Za-z0-9_\-\/]*\/[A-Za-z0-9_\-\.\/]*)',
r'.*\b' + 'www.blogger.com'.replace('www.', r'\.?') + r'\b(?!\.)'
)
self.assertNotEqual(links.obtener_scripts_desde_url(url_blogger, dominio_blogger, html_blogger),
lista_scripts_blogger
)
def test_obtener_link_valido(self):
links._compilar_regex(r'(?!^//)[A-Za-z0-9_\-//]*\.\w*',
'([A-Za-z0-9_\-\/]*\/[A-Za-z0-9_\-\.\/]*)',
r'.*\b' + 'www.blogger.com'.replace('www.', '\.?') + r'\b(?!\.)'
)
url_blogger = 'https://www.blogger.com/about/?r=2'
dominio_blogger = 'https'
link = '/go/createyourblog'
self.assertEqual(links.obtener_link_valido(url_blogger, link, dominio_blogger),
'https://www.blogger.com/go/createyourblog'
)
self.assertEqual(links.obtener_link_valido(url_blogger, '/', dominio_blogger),
'https://www.blogger.com/'
)
def test_obtener_entradas_desde_url(self):
url_unlam = 'http://alumno2.unlam.edu.ar/index.jsp?pageLand=registrarse'
html_unlam = open('test/unlam_html.txt', 'r').read()
parametros = links.obtener_entradas_desde_url(html_unlam)
parametro = parametros[0][0]['id']
self.assertEqual(parametro,
'docume'
)
def test_es_url_prohibida(self):
self.assertTrue(links.es_url_prohibida('http://example.com/asd/imagen.jpg'))
self.assertFalse(links.es_url_prohibida('http://example.com/asd/noespng.html'))
def test_es_url_valida(self):
self.assertFalse(links.es_url_valida('python.org'))
self.assertTrue(links.es_url_valida('https://www.python.org'))
def test_se_puede_acceder_a_url(self):
self.assertFalse(links.se_puede_acceder_a_url('https://sitioquenoesasfasdasda.org'))
self.assertTrue(links.se_puede_acceder_a_url('https://www.python.org'))
def test_abrir_url_en_navegador(self):
br = mechanize.Browser()
links.configurar_navegador(br)
lista_cookies = links.obtener_cookies_validas('DXGlobalization_lang=en;DXGlobalization_locale=en-US;DXGlobalization_currency=ARS')
self.assertFalse(links.abrir_url_en_navegador(br, 'https://sitioquenoesasfasdasda.org'))
self.assertTrue(links.abrir_url_en_navegador(br, 'https://www.python.org'))
self.assertTrue(links.abrir_url_en_navegador(br, 'https://cart.dx.com/'))
self.assertTrue(links.abrir_url_en_navegador(br, 'https://cart.dx.com/', lista_cookies))
def test_validar_formato_cookies(self):
lista_cookies = links.obtener_cookies_validas('DXGlobalization_lang=en;DXGlobalization_locale=en-US;DXGlobalization_currency=ARS')
#self.assertEqual(dict_cokies,
# {'DXGlobalization_lang':'en', 'DXGlobalization_locale':'en-US','DXGlobalization_currency':'ARS' }
# )
self.assertEqual(lista_cookies,
['DXGlobalization_lang=en', 'DXGlobalization_locale=en-US','DXGlobalization_currency=ARS' ]
)
self.assertFalse(links.obtener_cookies_validas('DXGlobalization_lang=en;'))
self.assertFalse(links.obtener_cookies_validas('DXGlobalization_lang='))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5ed75f225487428e5091f262fa1b9f52",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 138,
"avg_line_length": 46.64035087719298,
"alnum_prop": 0.567801391762272,
"repo_name": "leapalazzolo/XSS",
"id": "62ff02a6bac3206fc4fd091a30ff001276808ac7",
"size": "5360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_links.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45203"
}
],
"symlink_target": ""
} |
import re
import operator
from collections import Counter
from zipfile import ZipFile
from numpy import array
from scipy import zeros
from scipy.stats import chisquare
kWORDS = re.compile("[a-z]{1,}")
kSTOPWORDS = set(['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'yo',
'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which',
'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having',
'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if',
'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for',
'with', 'about', 'against', 'between', 'into', 'through', 'during',
'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very',
's', 't', 'can', 'will', 'just', 'don', 'should', 'now', 've', 'm'])
def bigrams(sentence):
"""
Given a sentence, generate all bigrams in the sentence.
"""
for ii, ww in enumerate(sentence[:-1]):
yield ww, sentence[ii + 1]
def tokenize(sentence):
"""
Given a sentence, return a list of all the words in the sentence.
"""
return kWORDS.findall(sentence.lower())
def sentences_from_zipfile(zip_file):
"""
Given a zip file, yield an iterator over the lines in each file in the
zip file.
"""
with ZipFile(zip_file) as z:
for ii in z.namelist():
try:
pres = ii.replace(".txt", "").replace("state_union/", "").split("-")[1]
except IndexError:
continue
for jj in z.read(ii).decode(errors='replace').split("\n")[3:]:
yield jj.lower()
def chisquare_pvalue(obs, ex):
"""
Given a 2x2 contingency table both observed and expected, returns the
corresponding chisquared p-value.
@param obs An array (list of lists or numpy array) of observed values
@param obs An array (list of lists or numpy array) of expected values
"""
return 1.0
class BigramFinder:
"""
Finds bigrams in a stream of text.
"""
def __init__(self, min_unigram = 10, max_unigram = 150, min_ngram = 5,
exclude=[]):
"""
Instantiates the class.
@param min_ngram Ignore bigrams that appear fewer than this many times
@param max_unigram Ignore words that appear more than this many times
@param min_unigram Ignore words that appear fewer than this many times
@param exclude Don't add words from this set to bigrams
"""
self._exclude = set(exclude)
self._max_unigram = max_unigram
self._min_unigram = min_unigram
self._min_ngram = min_ngram
self._vocab = None
# You may want to add additional data structures here.
self._unigram = Counter()
def observed_and_expected(self, bigram):
"""
Compute the observed and expected counts for a bigram
@bigram A tuple containing the words to score
"""
obs = zeros((2, 2))
ex = zeros((2, 2))
return obs, ex
def score(self, bigram):
"""
Compute the chi-square probability of a bigram being dependent.
If either word of a bigram is in the "exclude" list, return 1.0.
@bigram A tuple containing the words to score
"""
# you shouldn't need to edit this function
if any(x in self._exclude for x in bigram):
return 1.0
obs, ex = self.observed_and_expected(bigram)
return chisquare_pvalue(obs, ex)
def vocab_scan(self, sentence):
"""
Given a sentence, scan all of its words and add up their counts.
This will be used to finalize the vocabulary later.
"""
# Don't modify this function.
for ii in sentence:
self._unigram[ii] += 1
def vocab(self):
"""
Return the finder's vocab
"""
# Don't modify this function.
return self._vocab
def finalize(self):
"""
Creates the vocabulary of for later processing. Filters low frequency
and high frequency words.
"""
# Don't modify this function.
self._vocab = set(x for x in self._unigram if self._unigram
if self._unigram[x] >= self._min_unigram and
self._unigram[x] <= self._max_unigram and
x not in self._exclude)
def add_sentence(self, sentence):
"""
Add the counts for a sentence (assumed to be iterable) so that we can
then score bigrams.
"""
assert self._vocab is not None, "Adding counts before finalizing vocabulary"
# Your code here
for ll, rr in bigrams(sentence):
None
# Your code here
def valid_bigrams(self):
"""
Return an iterator over the bigrams that have been seen enough to get a
score.
"""
# Your code here
return []
def sorted_bigrams(self):
"""
Return n-grams sorted by the probability of being an n-gram. Should
yield a tuple of words in bigram and the p-value of the bigram.
"""
# You should not need to modify this function
d = {}
for ngram in self.valid_bigrams():
d[ngram] = self.score(ngram)
for ngram, score in sorted(d.items(), key=operator.itemgetter(1), reverse=True):
yield ngram, score
if __name__ == "__main__":
bf = BigramFinder(exclude=kSTOPWORDS)
for sent in sentences_from_zipfile("../data/state_union.zip"):
bf.vocab_scan(tokenize(sent))
bf.finalize()
for sent in sentences_from_zipfile("../data/state_union.zip"):
bf.add_sentence(tokenize(sent))
for ngram, score in list(bf.sorted_bigrams())[:100]:
print("%f\t%s\t%s\t" % (score, ngram[0], ngram[1]))
| {
"content_hash": "0f49ac234eda8dff87100798fb6ef2e8",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 88,
"avg_line_length": 32.11538461538461,
"alnum_prop": 0.5392215568862275,
"repo_name": "Pinafore/ds-hw",
"id": "dfb918fb45914c6b071e729c24ba5f7e1751b0d1",
"size": "6680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stat_test/bigrams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11597"
},
{
"name": "Python",
"bytes": "38963"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'PendingTimelineShare.user'
db.alter_column(u'timeline_pendingtimelineshare', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['canvas_auth.User']))
def backwards(self, orm):
# Changing field 'PendingTimelineShare.user'
db.alter_column(u'timeline_pendingtimelineshare', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attribution_copy': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'attribution_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'star_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'ugq': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': u"orm['canvas.Content']"}),
'stroke_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "u'auth_user'", '_ormbases': [u'auth.User'], 'proxy': 'True'}
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'timeline.pendingtimelineshare': {
'Meta': {'object_name': 'PendingTimelineShare'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas_auth.User']"})
}
}
complete_apps = ['timeline'] | {
"content_hash": "11125ffd52acd98a21d4db0ccfedd84b",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 196,
"avg_line_length": 81.82786885245902,
"alnum_prop": 0.5616548131824101,
"repo_name": "drawquest/drawquest-web",
"id": "5f81b364bcce575417239fcbb806c01299810bad",
"size": "10007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/drawquest/apps/timeline/migrations/0003_auto__chg_field_pendingtimelineshare_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "634659"
},
{
"name": "CoffeeScript",
"bytes": "8968"
},
{
"name": "HTML",
"bytes": "898627"
},
{
"name": "JavaScript",
"bytes": "1507053"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "7220727"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
} |
import os.path as op
import warnings
import copy
import numpy as np
from .fixes import _get_img_fdata
from .parallel import parallel_func
from .source_estimate import (
_BaseSurfaceSourceEstimate, _BaseVolSourceEstimate, _BaseSourceEstimate,
_get_ico_tris)
from .source_space import SourceSpaces, _ensure_src, _grid_interp
from .surface import read_morph_map, mesh_edges, read_surface, _compute_nearest
from .transforms import _angle_between_quats, rot_to_quat
from .utils import (logger, verbose, check_version, get_subjects_dir,
warn as warn_, fill_doc, _check_option, _validate_type,
BunchConst, wrapped_stdout, _check_fname, warn,
_ensure_int, ProgressBar, use_log_level)
from .externals.h5io import read_hdf5, write_hdf5
@verbose
def compute_source_morph(src, subject_from=None, subject_to='fsaverage',
subjects_dir=None, zooms='auto',
niter_affine=(100, 100, 10), niter_sdr=(5, 5, 3),
spacing=5, smooth=None, warn=True, xhemi=False,
sparse=False, src_to=None, precompute=False,
verbose=False):
"""Create a SourceMorph from one subject to another.
Method is based on spherical morphing by FreeSurfer for surface
cortical estimates :footcite:`GreveEtAl2013` and
Symmetric Diffeomorphic Registration for volumic data
:footcite:`AvantsEtAl2008`.
Parameters
----------
src : instance of SourceSpaces | instance of SourceEstimate
The SourceSpaces of subject_from (can be a
SourceEstimate if only using a surface source space).
subject_from : str | None
Name of the original subject as named in the SUBJECTS_DIR.
If None (default), then ``src[0]['subject_his_id]'`` will be used.
subject_to : str | None
Name of the subject to which to morph as named in the SUBJECTS_DIR.
Default is ``'fsaverage'``. If None, ``src_to[0]['subject_his_id']``
will be used.
.. versionchanged:: 0.20
Support for subject_to=None.
%(subjects_dir)s
zooms : float | tuple | str | None
The voxel size of volume for each spatial dimension in mm.
If spacing is None, MRIs won't be resliced, and both volumes
must have the same number of spatial dimensions.
Can also be ``'auto'`` to use ``5.`` if ``src_to is None`` and
the zooms from ``src_to`` otherwise.
.. versionchanged:: 0.20
Support for 'auto' mode.
niter_affine : tuple of int
Number of levels (``len(niter_affine)``) and number of
iterations per level - for each successive stage of iterative
refinement - to perform the affine transform.
Default is niter_affine=(100, 100, 10).
niter_sdr : tuple of int
Number of levels (``len(niter_sdr)``) and number of
iterations per level - for each successive stage of iterative
refinement - to perform the Symmetric Diffeomorphic Registration (sdr)
transform. Default is niter_sdr=(5, 5, 3).
spacing : int | list | None
The resolution of the icosahedral mesh (typically 5).
If None, all vertices will be used (potentially filling the
surface). If a list, then values will be morphed to the set of
vertices specified in in ``spacing[0]`` and ``spacing[1]``.
This will be ignored if ``src_to`` is supplied.
.. versionchanged:: 0.21
src_to, if provided, takes precedence.
smooth : int | str | None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values. Can also be ``'nearest'`` to use the nearest
vertices on the surface (requires SciPy >= 1.3).
.. versionchanged:: 0.20
Added support for 'nearest'.
warn : bool
If True, warn if not all vertices were used. The default is warn=True.
xhemi : bool
Morph across hemisphere. Currently only implemented for
``subject_to == subject_from``. See notes below.
The default is xhemi=False.
sparse : bool
Morph as a sparse source estimate. Works only with (Vector)
SourceEstimate. If True the only parameters used are subject_to and
subject_from, and spacing has to be None. Default is sparse=False.
src_to : instance of SourceSpaces | None
The destination source space.
- For surface-based morphing, this is the preferred over ``spacing``
for providing the vertices.
- For volumetric morphing, this should be passed so that 1) the
resultingmorph volume is properly constrained to the brain volume,
and 2) STCs from multiple subjects morphed to the same destination
subject/source space have the vertices.
- For mixed (surface + volume) morphing, this is required.
.. versionadded:: 0.20
precompute : bool
If True (default False), compute the sparse matrix representation of
the volumetric morph (if present). This takes a long time to
compute, but can make morphs faster when thousands of points are used.
See :meth:`mne.SourceMorph.compute_vol_morph_mat` (which can be called
later if desired) for more information.
.. versionadded:: 0.22
%(verbose)s
Returns
-------
morph : instance of SourceMorph
The :class:`mne.SourceMorph` object.
Notes
-----
This function can be used to morph surface data between hemispheres by
setting ``xhemi=True``. The full cross-hemisphere morph matrix maps left
to right and right to left. A matrix for cross-mapping only one hemisphere
can be constructed by specifying the appropriate vertices, for example, to
map the right hemisphere to the left::
vertices_from=[[], vert_rh], vertices_to=[vert_lh, []]
Cross-hemisphere mapping requires appropriate ``sphere.left_right``
morph-maps in the subject's directory. These morph maps are included
with the ``fsaverage_sym`` FreeSurfer subject, and can be created for other
subjects with the ``mris_left_right_register`` FreeSurfer command. The
``fsaverage_sym`` subject is included with FreeSurfer > 5.1 and can be
obtained as described `here
<https://surfer.nmr.mgh.harvard.edu/fswiki/Xhemi>`_. For statistical
comparisons between hemispheres, use of the symmetric ``fsaverage_sym``
model is recommended to minimize bias :footcite:`GreveEtAl2013`.
.. versionadded:: 0.17.0
.. versionadded:: 0.21.0
Support for morphing mixed source estimates.
References
----------
.. footbibliography::
"""
src_data, kind, src_subject = _get_src_data(src)
subject_from = _check_subject_src(subject_from, src_subject)
del src
_validate_type(src_to, (SourceSpaces, None), 'src_to')
_validate_type(subject_to, (str, None), 'subject_to')
if src_to is None and subject_to is None:
raise ValueError('subject_to cannot be None when src_to is None')
subject_to = _check_subject_src(subject_to, src_to, 'subject_to')
# Params
warn = False if sparse else warn
if kind not in 'surface' and xhemi:
raise ValueError('Inter-hemispheric morphing can only be used '
'with surface source estimates.')
if sparse and kind != 'surface':
raise ValueError('Only surface source estimates can compute a '
'sparse morph.')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
shape = affine = pre_affine = sdr_morph = morph_mat = None
vertices_to_surf, vertices_to_vol = list(), list()
if kind in ('volume', 'mixed'):
_check_dep(nibabel='2.1.0', dipy='0.10.1')
import nibabel as nib
logger.info('Volume source space(s) present...')
# load moving MRI
mri_subpath = op.join('mri', 'brain.mgz')
mri_path_from = op.join(subjects_dir, subject_from, mri_subpath)
logger.info(' Loading %s as "from" volume' % mri_path_from)
with warnings.catch_warnings():
mri_from = nib.load(mri_path_from)
# eventually we could let this be some other volume, but for now
# let's KISS and use `brain.mgz`, too
mri_path_to = op.join(subjects_dir, subject_to, mri_subpath)
if not op.isfile(mri_path_to):
raise IOError('cannot read file: %s' % mri_path_to)
logger.info(' Loading %s as "to" volume' % mri_path_to)
with warnings.catch_warnings():
mri_to = nib.load(mri_path_to)
# deal with `src_to` subsampling
zooms_src_to = None
if src_to is None:
if kind == 'mixed':
raise ValueError('src_to must be provided when using a '
'mixed source space')
else:
surf_offset = 2 if src_to.kind == 'mixed' else 0
# All of our computations are in RAS (like img.affine), so we need
# to get the transformation from RAS to the source space
# subsampling of vox (src), not MRI (FreeSurfer surface RAS) to src
src_ras_t = np.dot(src_to[-1]['mri_ras_t']['trans'],
src_to[-1]['src_mri_t']['trans'])
src_ras_t[:3] *= 1e3
src_data['to_vox_map'] = (src_to[-1]['shape'], src_ras_t)
vertices_to_vol = [s['vertno'] for s in src_to[surf_offset:]]
zooms_src_to = np.diag(src_to[-1]['src_mri_t']['trans'])[:3] * 1000
zooms_src_to = tuple(zooms_src_to)
# pre-compute non-linear morph
zooms = _check_zooms(mri_from, zooms, zooms_src_to)
shape, zooms, affine, pre_affine, sdr_morph = _compute_morph_sdr(
mri_from, mri_to, niter_affine, niter_sdr, zooms)
if kind in ('surface', 'mixed'):
logger.info('surface source space present ...')
vertices_from = src_data['vertices_from']
if sparse:
if spacing is not None:
raise ValueError('spacing must be set to None if '
'sparse=True.')
if xhemi:
raise ValueError('xhemi=True can only be used with '
'sparse=False')
vertices_to_surf, morph_mat = _compute_sparse_morph(
vertices_from, subject_from, subject_to, subjects_dir)
else:
if src_to is not None:
assert src_to.kind in ('surface', 'mixed')
vertices_to_surf = [s['vertno'].copy() for s in src_to[:2]]
else:
vertices_to_surf = grade_to_vertices(
subject_to, spacing, subjects_dir, 1)
morph_mat = _compute_morph_matrix(
subject_from=subject_from, subject_to=subject_to,
vertices_from=vertices_from, vertices_to=vertices_to_surf,
subjects_dir=subjects_dir, smooth=smooth, warn=warn,
xhemi=xhemi)
n_verts = sum(len(v) for v in vertices_to_surf)
assert morph_mat.shape[0] == n_verts
vertices_to = vertices_to_surf + vertices_to_vol
if src_to is not None:
assert len(vertices_to) == len(src_to)
morph = SourceMorph(subject_from, subject_to, kind, zooms,
niter_affine, niter_sdr, spacing, smooth, xhemi,
morph_mat, vertices_to, shape, affine,
pre_affine, sdr_morph, src_data, None)
if precompute:
morph.compute_vol_morph_mat()
logger.info('[done]')
return morph
def _compute_sparse_morph(vertices_from, subject_from, subject_to,
subjects_dir=None):
"""Get nearest vertices from one subject to another."""
from scipy import sparse
maps = read_morph_map(subject_to, subject_from, subjects_dir)
cnt = 0
vertices = list()
cols = list()
for verts, map_hemi in zip(vertices_from, maps):
vertno_h = _sparse_argmax_nnz_row(map_hemi[verts])
order = np.argsort(vertno_h)
cols.append(cnt + order)
vertices.append(vertno_h[order])
cnt += len(vertno_h)
cols = np.concatenate(cols)
rows = np.arange(len(cols))
data = np.ones(len(cols))
morph_mat = sparse.coo_matrix((data, (rows, cols)),
shape=(len(cols), len(cols))).tocsr()
return vertices, morph_mat
_SOURCE_MORPH_ATTRIBUTES = [ # used in writing
'subject_from', 'subject_to', 'kind', 'zooms', 'niter_affine', 'niter_sdr',
'spacing', 'smooth', 'xhemi', 'morph_mat', 'vertices_to',
'shape', 'affine', 'pre_affine', 'sdr_morph', 'src_data',
'vol_morph_mat', 'verbose']
@fill_doc
class SourceMorph(object):
"""Morph source space data from one subject to another.
.. note:: This class should not be instantiated directly.
Use :func:`mne.compute_source_morph` instead.
Parameters
----------
subject_from : str | None
Name of the subject from which to morph as named in the SUBJECTS_DIR.
subject_to : str | array | list of array
Name of the subject on which to morph as named in the SUBJECTS_DIR.
The default is 'fsaverage'. If morphing a volume source space,
subject_to can be the path to a MRI volume. Can also be a list of
two arrays if morphing to hemisphere surfaces.
kind : str | None
Kind of source estimate. E.g. 'volume' or 'surface'.
zooms : float | tuple
See :func:`mne.compute_source_morph`.
niter_affine : tuple of int
Number of levels (``len(niter_affine)``) and number of
iterations per level - for each successive stage of iterative
refinement - to perform the affine transform.
niter_sdr : tuple of int
Number of levels (``len(niter_sdr)``) and number of
iterations per level - for each successive stage of iterative
refinement - to perform the Symmetric Diffeomorphic Registration (sdr)
transform :footcite:`AvantsEtAl2008`.
spacing : int | list | None
See :func:`mne.compute_source_morph`.
smooth : int | str | None
See :func:`mne.compute_source_morph`.
xhemi : bool
Morph across hemisphere.
morph_mat : scipy.sparse.csr_matrix
The sparse surface morphing matrix for spherical surface
based morphing :footcite:`GreveEtAl2013`.
vertices_to : list of ndarray
The destination surface vertices.
shape : tuple
The volume MRI shape.
affine : ndarray
The volume MRI affine.
pre_affine : instance of dipy.align.AffineMap
The transformation that is applied before the before ``sdr_morph``.
sdr_morph : instance of dipy.align.DiffeomorphicMap
The class that applies the the symmetric diffeomorphic registration
(SDR) morph.
src_data : dict
Additional source data necessary to perform morphing.
vol_morph_mat : scipy.sparse.csr_matrix | None
The volumetric morph matrix, if :meth:`compute_vol_morph_mat`
was used.
%(verbose)s
Notes
-----
.. versionadded:: 0.17
References
----------
.. footbibliography::
"""
def __init__(self, subject_from, subject_to, kind, zooms,
niter_affine, niter_sdr, spacing, smooth, xhemi,
morph_mat, vertices_to, shape,
affine, pre_affine, sdr_morph, src_data,
vol_morph_mat, verbose=None):
# universal
self.subject_from = subject_from
self.subject_to = subject_to
self.kind = kind
# vol input
self.zooms = zooms
self.niter_affine = niter_affine
self.niter_sdr = niter_sdr
# surf input
self.spacing = spacing
self.smooth = smooth
self.xhemi = xhemi
# surf computed
self.morph_mat = morph_mat
# vol computed
self.shape = shape
self.affine = affine
self.sdr_morph = sdr_morph
self.pre_affine = pre_affine
# used by both
self.src_data = src_data
self.vol_morph_mat = vol_morph_mat
self.verbose = verbose
# compute vertices_to here (partly for backward compat and no src
# provided)
if vertices_to is None or len(vertices_to) == 0 and kind == 'volume':
assert src_data['to_vox_map'] is None
vertices_to = self._get_vol_vertices_to_nz()
self.vertices_to = vertices_to
@property
def _vol_vertices_from(self):
assert isinstance(self.src_data['inuse'], list)
vertices_from = [np.where(in_)[0] for in_ in self.src_data['inuse']]
return vertices_from
@property
def _vol_vertices_to(self):
return self.vertices_to[0 if self.kind == 'volume' else 2:]
def _get_vol_vertices_to_nz(self):
logger.info('Computing nonzero vertices after morph ...')
n_vertices = sum(len(v) for v in self._vol_vertices_from)
ones = np.ones((n_vertices, 1))
with use_log_level(False):
return [np.where(self._morph_vols(ones, '', subselect=False))[0]]
@verbose
def apply(self, stc_from, output='stc', mri_resolution=False,
mri_space=None, verbose=None):
"""Morph source space data.
Parameters
----------
stc_from : VolSourceEstimate | VolVectorSourceEstimate | SourceEstimate | VectorSourceEstimate
The source estimate to morph.
output : str
Can be 'stc' (default) or possibly 'nifti1', or 'nifti2'
when working with a volume source space defined on a regular
grid.
mri_resolution : bool | tuple | int | float
If True the image is saved in MRI resolution. Default False.
WARNING: if you have many time points the file produced can be
huge. The default is mri_resolution=False.
mri_space : bool | None
Whether the image to world registration should be in mri space. The
default (None) is mri_space=mri_resolution.
%(verbose_meth)s
Returns
-------
stc_to : VolSourceEstimate | SourceEstimate | VectorSourceEstimate | Nifti1Image | Nifti2Image
The morphed source estimates.
""" # noqa: E501
_validate_type(output, str, 'output')
_validate_type(stc_from, _BaseSourceEstimate, 'stc_from',
'source estimate')
if isinstance(stc_from, _BaseSurfaceSourceEstimate):
allowed_kinds = ('stc',)
extra = 'when stc is a surface source estimate'
else:
allowed_kinds = ('stc', 'nifti1', 'nifti2')
extra = ''
_check_option('output', output, allowed_kinds, extra)
stc = copy.deepcopy(stc_from)
mri_space = mri_resolution if mri_space is None else mri_space
if stc.subject is None:
stc.subject = self.subject_from
if self.subject_from is None:
self.subject_from = stc.subject
if stc.subject != self.subject_from:
raise ValueError('stc_from.subject and '
'morph.subject_from must match. (%s != %s)' %
(stc.subject, self.subject_from))
out = _apply_morph_data(self, stc)
if output != 'stc': # convert to volume
out = _morphed_stc_as_volume(
self, out, mri_resolution=mri_resolution, mri_space=mri_space,
output=output)
return out
@verbose
def compute_vol_morph_mat(self, *, verbose=None):
"""Compute the sparse matrix representation of the volumetric morph.
Parameters
----------
%(verbose_meth)s
Returns
-------
morph : instance of SourceMorph
The instance (modified in-place).
Notes
-----
For a volumetric morph, this will compute the morph for an identity
source volume, i.e., with one source vertex active at a time, and store
the result as a :class:`sparse <scipy.sparse.csr_matrix>`
morphing matrix. This takes a long time (minutes) to compute initially,
but drastically speeds up :meth:`apply` for STCs, so it can be
beneficial when many time points or many morphs (i.e., greater than
the number of volumetric ``src_from`` vertices) will be performed.
When calling :meth:`save`, this sparse morphing matrix is saved with
the instance, so this only needs to be called once. This function does
nothing if the morph matrix has already been computed, or if there is
no volume morphing necessary.
.. versionadded:: 0.22
"""
if self.affine is None or self.vol_morph_mat is not None:
return
logger.info('Computing sparse volumetric morph matrix '
'(will take some time...)')
self.vol_morph_mat = self._morph_vols(None, 'Vertex')
return self
def _morph_vols(self, vols, mesg, subselect=True):
from scipy import sparse
from dipy.align.reslice import reslice
interp = self.src_data['interpolator'].tocsc()[
:, np.concatenate(self._vol_vertices_from)]
n_vols = interp.shape[1] if vols is None else vols.shape[1]
attrs = ('real', 'imag') if np.iscomplexobj(vols) else ('real',)
dtype = np.complex128 if len(attrs) == 2 else np.float64
if vols is None: # sparse -> sparse mode
img_to = (list(), list(), [0]) # data, indices, indptr
assert subselect
else: # dense -> dense mode
img_to = None
if subselect:
vol_verts = np.concatenate(self._vol_vertices_to)
else:
vol_verts = slice(None)
# morph data
from_affine = np.dot(
self.src_data['src_affine_ras'], # mri_ras_t
self.src_data['src_affine_vox']) # vox_mri_t
from_affine[:3] *= 1000.
# equivalent of:
# _resample_from_to(img_real, from_affine,
# (self.pre_affine.codomain_shape,
# (self.pre_affine.codomain_grid2world))
src_shape = self.src_data['src_shape_full'][::-1]
resamp_0 = _grid_interp(
src_shape, self.pre_affine.codomain_shape,
np.linalg.inv(from_affine) @ self.pre_affine.codomain_grid2world)
# reslice to match what was used during the morph
# (brain.mgz and whatever was used to create the source space
# will not necessarily have the same domain/zooms)
# equivalent of:
# pre_affine.transform(img_real)
resamp_1 = _grid_interp(
self.pre_affine.codomain_shape, self.pre_affine.domain_shape,
np.linalg.inv(self.pre_affine.codomain_grid2world) @
self.pre_affine.affine @
self.pre_affine.domain_grid2world)
resamp_0_1 = resamp_1 @ resamp_0
resamp_2 = None
for ii in ProgressBar(list(range(n_vols)), mesg=mesg):
for attr in attrs:
# transform from source space to mri_from resolution/space
if vols is None:
img_real = interp[:, ii]
else:
img_real = interp @ getattr(vols[:, ii], attr)
_debug_img(img_real, from_affine, 'From', src_shape)
img_real = resamp_0_1 @ img_real
if sparse.issparse(img_real):
img_real = img_real.toarray()
img_real = img_real.reshape(
self.pre_affine.domain_shape, order='F')
if self.sdr_morph is not None:
img_real = self.sdr_morph.transform(img_real)
_debug_img(img_real, self.affine, 'From-reslice-transform')
# subselect the correct cube if src_to is provided
if self.src_data['to_vox_map'] is not None:
affine = self.affine
to_zooms = np.diag(self.src_data['to_vox_map'][1])[:3]
# There might be some sparse equivalent to this but
# not sure...
if not np.allclose(self.zooms, to_zooms, atol=1e-3):
img_real, affine = reslice(
img_real, self.affine, self.zooms, to_zooms)
_debug_img(img_real, affine,
'From-reslice-transform-src')
if resamp_2 is None:
resamp_2 = _grid_interp(
img_real.shape, self.src_data['to_vox_map'][0],
np.linalg.inv(affine) @
self.src_data['to_vox_map'][1])
# Equivalent to:
# _resample_from_to(
# img_real, affine, self.src_data['to_vox_map'])
img_real = resamp_2 @ img_real.ravel(order='F')
_debug_img(img_real, self.src_data['to_vox_map'][1],
'From-reslice-transform-src-subselect',
self.src_data['to_vox_map'][0])
# This can be used to help debug, but it really should just
# show the brain filling the volume:
# img_want = np.zeros(np.prod(img_real.shape))
# img_want[np.concatenate(self._vol_vertices_to)] = 1.
# img_want = np.reshape(
# img_want, self.src_data['src_shape'][::-1], order='F')
# _debug_img(img_want, self.src_data['to_vox_map'][1],
# 'To mask')
# raise RuntimeError('Check')
# combine real and complex parts
img_real = img_real.ravel(order='F')[vol_verts]
# initialize output
if img_to is None and vols is not None:
img_to = np.zeros((img_real.size, n_vols), dtype=dtype)
if vols is None:
idx = np.where(img_real)[0]
img_to[0].extend(img_real[idx])
img_to[1].extend(idx)
img_to[2].append(img_to[2][-1] + len(idx))
else:
if attr == 'real':
img_to[:, ii] = img_to[:, ii] + img_real
else:
img_to[:, ii] = img_to[:, ii] + 1j * img_real
if vols is None:
img_to = sparse.csc_matrix(
img_to, shape=(len(vol_verts), n_vols)).tocsr()
return img_to
def __repr__(self): # noqa: D105
s = u"%s" % self.kind
s += u", %s -> %s" % (self.subject_from, self.subject_to)
if self.kind == 'volume':
s += ", zooms : {}".format(self.zooms)
s += ", niter_affine : {}".format(self.niter_affine)
s += ", niter_sdr : {}".format(self.niter_sdr)
elif self.kind in ('surface', 'vector'):
s += ", spacing : {}".format(self.spacing)
s += ", smooth : %s" % self.smooth
s += ", xhemi" if self.xhemi else ""
return "<SourceMorph | %s>" % s
@verbose
def save(self, fname, overwrite=False, verbose=None):
"""Save the morph for source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. '-morph.h5' will be added if fname does
not end with '.h5'.
%(overwrite)s
%(verbose_meth)s
"""
fname = _check_fname(fname, overwrite=overwrite, must_exist=False)
if not fname.endswith('.h5'):
fname = '%s-morph.h5' % fname
out_dict = {k: getattr(self, k) for k in _SOURCE_MORPH_ATTRIBUTES}
for key in ('pre_affine', 'sdr_morph'): # classes
if out_dict[key] is not None:
out_dict[key] = out_dict[key].__dict__
write_hdf5(fname, out_dict, overwrite=overwrite)
_slicers = list()
def _debug_img(data, affine, title, shape=None):
# XXX uncomment these lines for debugging help with volume morph
# import nibabel as nib
# if sparse.issparse(data):
# data = data.toarray()
# data = np.asarray(data)
# if shape is not None:
# data = np.reshape(data, shape, order='F')
# _slicers.append(nib.viewers.OrthoSlicer3D(
# data, affine, axes=None, title=title))
# _slicers[-1].figs[0].suptitle(title, color='r')
return
def _check_zooms(mri_from, zooms, zooms_src_to):
# use voxel size of mri_from
if isinstance(zooms, str) and zooms == 'auto':
zooms = zooms_src_to if zooms_src_to is not None else 5.
if zooms is None:
zooms = mri_from.header.get_zooms()[:3]
zooms = np.atleast_1d(zooms).astype(float)
if zooms.shape == (1,):
zooms = np.repeat(zooms, 3)
if zooms.shape != (3,):
raise ValueError('zooms must be None, a singleton, or have shape (3,),'
' got shape %s' % (zooms.shape,))
zooms = tuple(zooms)
return zooms
def _resample_from_to(img, affine, to_vox_map):
# Wrap to dipy for speed, equivalent to:
# from nibabel.processing import resample_from_to
# from nibabel.spatialimages import SpatialImage
# return _get_img_fdata(
# resample_from_to(SpatialImage(img, affine), to_vox_map, order=1))
import dipy.align.imaffine
return dipy.align.imaffine.AffineMap(
None, to_vox_map[0], to_vox_map[1],
img.shape, affine).transform(img, resample_only=True)
###############################################################################
# I/O
def _check_subject_src(subject, src, name='subject_from', src_name='src'):
if isinstance(src, str):
subject_check = src
elif src is None: # assume it's correct although dangerous but unlikely
subject_check = subject
else:
subject_check = src._subject
if subject_check is None:
warn('The source space does not contain the subject name, we '
'recommend regenerating the source space (and forward / '
'inverse if applicable) for better code reliability')
if subject is None:
subject = subject_check
elif subject_check is not None and subject != subject_check:
raise ValueError('%s does not match %s subject (%s != %s)'
% (name, src_name, subject, subject_check))
if subject is None:
raise ValueError('%s could not be inferred from %s, it must be '
'specified' % (name, src_name))
return subject
def read_source_morph(fname):
"""Load the morph for source estimates from a file.
Parameters
----------
fname : str
Full filename including path.
Returns
-------
source_morph : instance of SourceMorph
The loaded morph.
"""
vals = read_hdf5(fname)
if vals['pre_affine'] is not None: # reconstruct
from dipy.align.imaffine import AffineMap
affine = vals['pre_affine']
vals['pre_affine'] = AffineMap(None)
vals['pre_affine'].__dict__ = affine
if vals['sdr_morph'] is not None:
from dipy.align.imwarp import DiffeomorphicMap
morph = vals['sdr_morph']
vals['sdr_morph'] = DiffeomorphicMap(None, [])
vals['sdr_morph'].__dict__ = morph
# Backward compat with when it used to be a list
if isinstance(vals['vertices_to'], np.ndarray):
vals['vertices_to'] = [vals['vertices_to']]
# Backward compat with when it used to be a single array
if isinstance(vals['src_data'].get('inuse', None), np.ndarray):
vals['src_data']['inuse'] = [vals['src_data']['inuse']]
# added with compute_vol_morph_mat in 0.22:
vals['vol_morph_mat'] = vals.get('vol_morph_mat', None)
return SourceMorph(**vals)
###############################################################################
# Helper functions for SourceMorph methods
def _check_dep(nibabel='2.1.0', dipy='0.10.1'):
"""Check dependencies."""
for lib, ver in zip(['nibabel', 'dipy'],
[nibabel, dipy]):
passed = True if not ver else check_version(lib, ver)
if not passed:
raise ImportError('%s %s or higher must be correctly '
'installed and accessible from Python' % (lib,
ver))
def _morphed_stc_as_volume(morph, stc, mri_resolution, mri_space, output):
"""Return volume source space as Nifti1Image and/or save to disk."""
assert isinstance(stc, _BaseVolSourceEstimate) # should be guaranteed
if stc._data_ndim == 3:
stc = stc.magnitude()
_check_dep(nibabel='2.1.0', dipy=False)
NiftiImage, NiftiHeader = _triage_output(output)
# if MRI resolution is set manually as a single value, convert to tuple
if isinstance(mri_resolution, (int, float)):
# use iso voxel size
new_zooms = (float(mri_resolution),) * 3
elif isinstance(mri_resolution, tuple):
new_zooms = mri_resolution
# if full MRI resolution, compute zooms from shape and MRI zooms
if isinstance(mri_resolution, bool):
new_zooms = _get_zooms_orig(morph) if mri_resolution else None
# create header
hdr = NiftiHeader()
hdr.set_xyzt_units('mm', 'msec')
hdr['pixdim'][4] = 1e3 * stc.tstep
# setup empty volume
if morph.src_data['to_vox_map'] is not None:
shape = morph.src_data['to_vox_map'][0]
affine = morph.src_data['to_vox_map'][1]
else:
shape = morph.shape
affine = morph.affine
assert stc.data.ndim == 2
n_times = stc.data.shape[1]
img = np.zeros((np.prod(shape), n_times))
img[stc.vertices[0], :] = stc.data
img = img.reshape(shape + (n_times,), order='F') # match order='F' above
del shape
# make nifti from data
with warnings.catch_warnings(): # nibabel<->numpy warning
img = NiftiImage(img, affine, header=hdr)
# reslice in case of manually defined voxel size
zooms = morph.zooms[:3]
if new_zooms is not None:
from dipy.align.reslice import reslice
new_zooms = new_zooms[:3]
img, affine = reslice(_get_img_fdata(img),
img.affine, # MRI to world registration
zooms, # old voxel size in mm
new_zooms) # new voxel size in mm
with warnings.catch_warnings(): # nibabel<->numpy warning
img = NiftiImage(img, affine)
zooms = new_zooms
# set zooms in header
img.header.set_zooms(tuple(zooms) + (1,))
return img
def _get_src_data(src, mri_resolution=True):
# copy data to avoid conflicts
_validate_type(
src, (_BaseSurfaceSourceEstimate, 'path-like', SourceSpaces),
'src', 'source space or surface source estimate')
if isinstance(src, _BaseSurfaceSourceEstimate):
src_t = [dict(vertno=src.vertices[0]), dict(vertno=src.vertices[1])]
src_kind = 'surface'
src_subject = src.subject
else:
src_t = _ensure_src(src).copy()
src_kind = src_t.kind
src_subject = src_t._subject
del src
_check_option('src kind', src_kind, ('surface', 'volume', 'mixed'))
# extract all relevant data for volume operations
src_data = dict()
if src_kind in ('volume', 'mixed'):
use_src = src_t[-1]
shape = use_src['shape']
start = 0 if src_kind == 'volume' else 2
for si, s in enumerate(src_t[start:], start):
if s.get('interpolator', None) is None:
if mri_resolution:
raise RuntimeError(
'MRI interpolator not present in src[%d], '
'cannot use mri_resolution=True' % (si,))
interpolator = None
break
else:
interpolator = sum((s['interpolator'] for s in src_t[start:]), 0.)
inuses = [s['inuse'] for s in src_t[start:]]
src_data.update({'src_shape': (shape[2], shape[1], shape[0]), # SAR
'src_affine_vox': use_src['vox_mri_t']['trans'],
'src_affine_src': use_src['src_mri_t']['trans'],
'src_affine_ras': use_src['mri_ras_t']['trans'],
'src_shape_full': ( # SAR
use_src['mri_height'], use_src['mri_depth'],
use_src['mri_width']),
'interpolator': interpolator,
'inuse': inuses,
'to_vox_map': None,
})
if src_kind in ('surface', 'mixed'):
src_data.update(vertices_from=[s['vertno'].copy() for s in src_t[:2]])
# delete copy
return src_data, src_kind, src_subject
def _triage_output(output):
_check_option('output', output, ['nifti', 'nifti1', 'nifti2'])
if output in ('nifti', 'nifti1'):
from nibabel import (Nifti1Image as NiftiImage,
Nifti1Header as NiftiHeader)
else:
assert output == 'nifti2'
from nibabel import (Nifti2Image as NiftiImage,
Nifti2Header as NiftiHeader)
return NiftiImage, NiftiHeader
def _interpolate_data(stc, morph, mri_resolution, mri_space, output):
"""Interpolate source estimate data to MRI."""
_check_dep(nibabel='2.1.0', dipy=False)
NiftiImage, NiftiHeader = _triage_output(output)
_validate_type(stc, _BaseVolSourceEstimate, 'stc',
'volume source estimate')
assert morph.kind in ('volume', 'mixed')
voxel_size_defined = False
if isinstance(mri_resolution, (int, float)) and not isinstance(
mri_resolution, bool):
# use iso voxel size
mri_resolution = (float(mri_resolution),) * 3
if isinstance(mri_resolution, tuple):
_check_dep(nibabel=False, dipy='0.10.1') # nibabel was already checked
from dipy.align.reslice import reslice
voxel_size = mri_resolution
voxel_size_defined = True
mri_resolution = True
# if data wasn't morphed yet - necessary for call of
# stc_unmorphed.as_volume. Since only the shape of src is known, it cannot
# be resliced to a given voxel size without knowing the original.
if isinstance(morph, SourceSpaces):
assert morph.kind in ('volume', 'mixed')
offset = 2 if morph.kind == 'mixed' else 0
if voxel_size_defined:
raise ValueError(
"Cannot infer original voxel size for reslicing... "
"set mri_resolution to boolean value or apply morph first.")
# Now deal with the fact that we may have multiple sub-volumes
inuse = [s['inuse'] for s in morph[offset:]]
src_shape = [s['shape'] for s in morph[offset:]]
assert len(set(map(tuple, src_shape))) == 1
src_subject = morph._subject
morph = BunchConst(src_data=_get_src_data(morph, mri_resolution)[0])
else:
# Make a list as we may have many inuse when using multiple sub-volumes
inuse = morph.src_data['inuse']
src_subject = morph.subject_from
assert isinstance(inuse, list)
if stc.subject is not None:
_check_subject_src(stc.subject, src_subject, 'stc.subject')
n_times = stc.data.shape[1]
shape = morph.src_data['src_shape'][::-1] + (n_times,) # SAR->RAST
dtype = np.complex128 if np.iscomplexobj(stc.data) else np.float64
# order='F' so that F-order flattening is faster
vols = np.zeros((np.prod(shape[:3]), shape[3]), dtype=dtype, order='F')
n_vertices_seen = 0
for this_inuse in inuse:
this_inuse = this_inuse.astype(bool)
n_vertices = np.sum(this_inuse)
stc_slice = slice(n_vertices_seen, n_vertices_seen + n_vertices)
vols[this_inuse] = stc.data[stc_slice]
n_vertices_seen += n_vertices
# use mri resolution as represented in src
if mri_resolution:
if morph.src_data['interpolator'] is None:
raise RuntimeError(
'Cannot morph with mri_resolution when add_interpolator=False '
'was used with setup_volume_source_space')
shape = morph.src_data['src_shape_full'][::-1] + (n_times,)
vols = morph.src_data['interpolator'] @ vols
# reshape back to proper shape
vols = np.reshape(vols, shape, order='F')
# set correct space
if mri_resolution:
affine = morph.src_data['src_affine_vox']
else:
affine = morph.src_data['src_affine_src']
if mri_space:
affine = np.dot(morph.src_data['src_affine_ras'], affine)
affine[:3] *= 1e3
# pre-define header
header = NiftiHeader()
header.set_xyzt_units('mm', 'msec')
header['pixdim'][4] = 1e3 * stc.tstep
# if a specific voxel size was targeted (only possible after morphing)
if voxel_size_defined:
# reslice mri
vols, affine = reslice(
vols, affine, _get_zooms_orig(morph), voxel_size)
with warnings.catch_warnings(): # nibabel<->numpy warning
vols = NiftiImage(vols, affine, header=header)
return vols
###############################################################################
# Morph for VolSourceEstimate
def _compute_r2(a, b):
return 100 * (a.ravel() @ b.ravel()) / \
(np.linalg.norm(a) * np.linalg.norm(b))
def _compute_morph_sdr(mri_from, mri_to, niter_affine, niter_sdr, zooms):
"""Get a matrix that morphs data from one subject to another."""
with np.testing.suppress_warnings():
from dipy.align import imaffine, imwarp, metrics, transforms
from dipy.align.reslice import reslice
logger.info('Computing nonlinear Symmetric Diffeomorphic Registration...')
# reslice mri_from to zooms
mri_from_orig = mri_from
mri_from, mri_from_affine = reslice(
_get_img_fdata(mri_from_orig), mri_from_orig.affine,
mri_from_orig.header.get_zooms()[:3], zooms)
# reslice mri_to to zooms
mri_to, affine = reslice(
_get_img_fdata(mri_to), mri_to.affine,
mri_to.header.get_zooms()[:3], zooms)
mri_to /= mri_to.max()
mri_from /= mri_from.max() # normalize
# compute center of mass
c_of_mass = imaffine.transform_centers_of_mass(
mri_to, affine, mri_from, mri_from_affine)
# set up Affine Registration
affreg = imaffine.AffineRegistration(
metric=imaffine.MutualInformationMetric(nbins=32),
level_iters=list(niter_affine),
sigmas=[3.0, 1.0, 0.0],
factors=[4, 2, 1])
# translation
logger.info('Optimizing translation:')
with wrapped_stdout(indent=' ', cull_newlines=True):
translation = affreg.optimize(
mri_to, mri_from, transforms.TranslationTransform3D(), None,
affine, mri_from_affine, starting_affine=c_of_mass.affine)
# rigid body transform (translation + rotation)
logger.info('Optimizing rigid-body:')
with wrapped_stdout(indent=' ', cull_newlines=True):
rigid = affreg.optimize(
mri_to, mri_from, transforms.RigidTransform3D(), None,
affine, mri_from_affine, starting_affine=translation.affine)
mri_from_to = rigid.transform(mri_from)
dist = np.linalg.norm(rigid.affine[:3, 3])
angle = np.rad2deg(_angle_between_quats(
np.zeros(3), rot_to_quat(rigid.affine[:3, :3])))
logger.info(f' Translation: {dist:6.1f} mm')
logger.info(f' Rotation: {angle:6.1f}°')
logger.info(f' R²: {_compute_r2(mri_to, mri_from_to):6.1f}%')
# affine transform (translation + rotation + scaling)
logger.info('Optimizing full affine:')
with wrapped_stdout(indent=' ', cull_newlines=True):
pre_affine = affreg.optimize(
mri_to, mri_from, transforms.AffineTransform3D(), None,
affine, mri_from_affine, starting_affine=rigid.affine)
mri_from_to = pre_affine.transform(mri_from)
logger.info(f' R²: {_compute_r2(mri_to, mri_from_to):6.1f}%')
# SDR
shape = tuple(pre_affine.domain_shape)
if len(niter_sdr):
sdr = imwarp.SymmetricDiffeomorphicRegistration(
metrics.CCMetric(3), list(niter_sdr))
logger.info('Optimizing SDR:')
with wrapped_stdout(indent=' ', cull_newlines=True):
sdr_morph = sdr.optimize(mri_to, pre_affine.transform(mri_from))
assert shape == tuple(sdr_morph.domain_shape) # should be tuple of int
mri_from_to = sdr_morph.transform(mri_from_to)
else:
sdr_morph = None
logger.info(f' R²: {_compute_r2(mri_to, mri_from_to):6.1f}%')
_debug_img(mri_from_orig.dataobj, mri_from_orig.affine, 'From')
_debug_img(mri_from, affine, 'From-reslice')
_debug_img(mri_from_to, affine, 'From-reslice')
_debug_img(mri_to, affine, 'To-reslice')
return shape, zooms, affine, pre_affine, sdr_morph
def _compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
smooth=None, subjects_dir=None, warn=True,
xhemi=False):
"""Compute morph matrix."""
from scipy import sparse
logger.info('Computing morph matrix...')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir, xhemi)
# morph the data
morpher = []
for hemi_to in range(2): # iterate over to / block-rows of CSR matrix
hemi_from = (1 - hemi_to) if xhemi else hemi_to
morpher.append(_hemi_morph(
tris[hemi_from], vertices_to[hemi_to], vertices_from[hemi_from],
smooth, maps[hemi_from], warn))
shape = (sum(len(v) for v in vertices_to),
sum(len(v) for v in vertices_from))
data = [m.data for m in morpher]
indices = [m.indices.copy() for m in morpher]
indptr = [m.indptr.copy() for m in morpher]
# column indices need to be adjusted
indices[0 if xhemi else 1] += len(vertices_from[0])
indices = np.concatenate(indices)
# row index pointers need to be adjusted
indptr[1] = indptr[1][1:] + len(data[0])
indptr = np.concatenate(indptr)
# data does not need to be adjusted
data = np.concatenate(data)
# this is equivalent to morpher = sparse_block_diag(morpher).tocsr(),
# but works for xhemi mode
morpher = sparse.csr_matrix((data, indices, indptr), shape=shape)
logger.info('[done]')
return morpher
def _hemi_morph(tris, vertices_to, vertices_from, smooth, maps, warn):
from scipy import sparse
if len(vertices_from) == 0:
return sparse.csr_matrix((len(vertices_to), 0))
e = mesh_edges(tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices)
if isinstance(smooth, str):
_check_option('smooth', smooth, ('nearest',),
extra=' when used as a string.')
mm = _surf_nearest(vertices_from, e).tocsr()
else:
mm = _surf_upsampling_mat(vertices_from, e, smooth, warn=warn)
assert mm.shape == (n_vertices, len(vertices_from))
if maps is not None:
mm = maps[vertices_to] * mm
else: # to == from
mm = mm[vertices_to]
assert mm.shape == (len(vertices_to), len(vertices_from))
return mm
@verbose
def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1,
verbose=None):
"""Convert a grade to source space vertices for a given subject.
Parameters
----------
subject : str
Name of the subject.
grade : int | list
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
%(subjects_dir)s
%(n_jobs)s
%(verbose)s
Returns
-------
vertices : list of array of int
Vertex numbers for LH and RH.
"""
_validate_type(grade, (list, 'int-like', None), 'grade')
# add special case for fsaverage for speed
if subject == 'fsaverage' and isinstance(grade, int) and grade == 5:
return [np.arange(10242), np.arange(10242)]
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
spheres_to = [op.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
lhs, rhs = [read_surface(s)[0] for s in spheres_to]
if grade is not None: # fill a subset of vertices
if isinstance(grade, list):
if not len(grade) == 2:
raise ValueError('grade as a list must have two elements '
'(arrays of output vertices)')
vertices = grade
else:
grade = _ensure_int(grade)
# find which vertices to use in "to mesh"
ico = _get_ico_tris(grade, return_surf=True)
lhs /= np.sqrt(np.sum(lhs ** 2, axis=1))[:, None]
rhs /= np.sqrt(np.sum(rhs ** 2, axis=1))[:, None]
# Compute nearest vertices in high dim mesh
parallel, my_compute_nearest, _ = \
parallel_func(_compute_nearest, n_jobs)
lhs, rhs, rr = [a.astype(np.float32)
for a in [lhs, rhs, ico['rr']]]
vertices = parallel(my_compute_nearest(xhs, rr)
for xhs in [lhs, rhs])
# Make sure the vertices are ordered
vertices = [np.sort(verts) for verts in vertices]
for verts in vertices:
if (np.diff(verts) == 0).any():
raise ValueError(
'Cannot use icosahedral grade %s with subject %s, '
'mapping %s vertices onto the high-resolution mesh '
'yields repeated vertices, use a lower grade or a '
'list of vertices from an existing source space'
% (grade, subject, len(verts)))
else: # potentially fill the surface
vertices = [np.arange(lhs.shape[0]), np.arange(rhs.shape[0])]
return vertices
def _surf_nearest(vertices, adj_mat):
from scipy import sparse
from scipy.sparse.csgraph import dijkstra
if not check_version('scipy', '1.3'):
raise ValueError('scipy >= 1.3 is required to use nearest smoothing, '
'consider upgrading SciPy or using a different '
'smoothing value')
# Vertices can be out of order, so sort them to start ...
order = np.argsort(vertices)
vertices = vertices[order]
_, _, sources = dijkstra(adj_mat, False, indices=vertices, min_only=True,
return_predecessors=True)
col = np.searchsorted(vertices, sources)
# ... then get things back to the correct configuration.
col = order[col]
row = np.arange(len(col))
data = np.ones(len(col))
mat = sparse.coo_matrix((data, (row, col)))
assert mat.shape == (adj_mat.shape[0], len(vertices)), mat.shape
return mat
def _csr_row_norm(data, row_norm):
assert row_norm.shape == (data.shape[0],)
data.data /= np.where(row_norm, row_norm, 1).repeat(np.diff(data.indptr))
def _surf_upsampling_mat(idx_from, e, smooth, warn=True):
"""Upsample data on a subject's surface given mesh edges."""
# we're in CSR format and it's to==from
from scipy import sparse
assert isinstance(e, sparse.csr_matrix)
n_tot = e.shape[0]
assert e.shape == (n_tot, n_tot)
# our output matrix starts out as a smaller matrix, and will gradually
# increase in size
data = sparse.eye(len(idx_from), format='csr')
_validate_type(smooth, ('int-like', str, None), 'smoothing steps')
if smooth is not None: # number of steps
smooth = _ensure_int(smooth, 'smoothing steps')
if smooth < 1:
raise ValueError(
'The number of smoothing operations has to be at least 1, got '
f'{smooth}')
smooth = smooth - 1
# idx will gradually expand from idx_from -> np.arange(n_tot)
idx = idx_from
recompute_idx_sum = True # always compute at least once
mult = np.zeros(n_tot)
for k in range(100): # the maximum allowed
# on first iteration it's already restricted, so we need to re-restrict
if k != 0 and len(idx) < n_tot:
data = data[idx]
# smoothing multiplication
use_e = e[:, idx] if len(idx) < n_tot else e
data = use_e * data
del use_e
# compute row sums + output indices
if recompute_idx_sum:
if len(idx) == n_tot:
row_sum = np.asarray(e.sum(-1))[:, 0]
idx = np.arange(n_tot)
recompute_idx_sum = False
else:
mult[idx] = 1
row_sum = e * mult
idx = np.where(row_sum)[0]
# do row normalization
_csr_row_norm(data, row_sum)
if k == smooth or (smooth is None and len(idx) == n_tot):
break # last iteration / done
assert data.shape == (n_tot, len(idx_from))
if len(idx) != n_tot and warn:
warn_(f'{n_tot-len(idx)}/{n_tot} vertices not included in smoothing, '
'consider increasing the number of steps')
logger.info(f' {k + 1} smooth iterations done.')
return data
def _sparse_argmax_nnz_row(csr_mat):
"""Return index of the maximum non-zero index in each row."""
n_rows = csr_mat.shape[0]
idx = np.empty(n_rows, dtype=np.int64)
for k in range(n_rows):
row = csr_mat[k].tocoo()
idx[k] = row.col[np.argmax(row.data)]
return idx
def _get_subject_sphere_tris(subject, subjects_dir):
spheres = [op.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
tris = [read_surface(s)[1] for s in spheres]
return tris
###############################################################################
# Apply morph to source estimate
def _get_zooms_orig(morph):
"""Compute src zooms from morph zooms, morph shape and src shape."""
# zooms_to = zooms_from / shape_to * shape_from for each spatial dimension
return [mz / ss * ms for mz, ms, ss in
zip(morph.zooms, morph.shape,
morph.src_data['src_shape_full'][::-1])]
def _check_vertices_match(v1, v2, name):
if not np.array_equal(v1, v2):
ext = ''
if np.in1d(v2, v1).all():
ext = ' Vertices were likely excluded during forward computation.'
raise ValueError(
'vertices do not match between morph (%s) and stc (%s) for %s:\n%s'
'\n%s\nPerhaps src_to=fwd["src"] needs to be passed when calling '
'compute_source_morph.%s' % (len(v1), len(v2), name, v1, v2, ext))
_VOL_MAT_CHECK_RATIO = 1.
def _apply_morph_data(morph, stc_from):
"""Morph a source estimate from one subject to another."""
if stc_from.subject is not None and stc_from.subject != morph.subject_from:
raise ValueError('stc.subject (%s) != morph.subject_from (%s)'
% (stc_from.subject, morph.subject_from))
_check_option('morph.kind', morph.kind, ('surface', 'volume', 'mixed'))
if morph.kind == 'surface':
_validate_type(stc_from, _BaseSurfaceSourceEstimate, 'stc_from',
'volume source estimate when using a surface morph')
elif morph.kind == 'volume':
_validate_type(stc_from, _BaseVolSourceEstimate, 'stc_from',
'surface source estimate when using a volume morph')
else:
assert morph.kind == 'mixed' # can handle any
_validate_type(stc_from, _BaseSourceEstimate, 'stc_from',
'source estimate when using a mixed source morph')
# figure out what to actually morph
do_vol = not isinstance(stc_from, _BaseSurfaceSourceEstimate)
do_surf = not isinstance(stc_from, _BaseVolSourceEstimate)
vol_src_offset = 2 if do_surf else 0
from_surf_stop = sum(len(v) for v in stc_from.vertices[:vol_src_offset])
to_surf_stop = sum(len(v) for v in morph.vertices_to[:vol_src_offset])
from_vol_stop = stc_from.data.shape[0]
vertices_to = morph.vertices_to
if morph.kind == 'mixed':
vertices_to = vertices_to[0 if do_surf else 2:None if do_vol else 2]
to_vol_stop = sum(len(v) for v in vertices_to)
mesg = 'Ori × Time' if stc_from.data.ndim == 3 else 'Time'
data_from = np.reshape(stc_from.data, (stc_from.data.shape[0], -1))
n_times = data_from.shape[1] # oris treated as times
data = np.empty((to_vol_stop, n_times), stc_from.data.dtype)
to_used = np.zeros(data.shape[0], bool)
from_used = np.zeros(data_from.shape[0], bool)
if do_vol:
stc_from_vertices = stc_from.vertices[vol_src_offset:]
vertices_from = morph._vol_vertices_from
for ii, (v1, v2) in enumerate(zip(vertices_from, stc_from_vertices)):
_check_vertices_match(v1, v2, 'volume[%d]' % (ii,))
from_sl = slice(from_surf_stop, from_vol_stop)
assert not from_used[from_sl].any()
from_used[from_sl] = True
to_sl = slice(to_surf_stop, to_vol_stop)
assert not to_used[to_sl].any()
to_used[to_sl] = True
# Loop over time points to save memory
if morph.vol_morph_mat is None and \
n_times >= _VOL_MAT_CHECK_RATIO * (to_vol_stop - to_surf_stop):
warn('Computing a sparse volume morph matrix will save time over '
'directly morphing, calling morph.compute_vol_morph_mat(). '
'Consider (re-)saving your instance to disk to avoid '
'subsequent recomputation.')
morph.compute_vol_morph_mat()
if morph.vol_morph_mat is None:
logger.debug('Using individual volume morph')
data[to_sl, :] = morph._morph_vols(data_from[from_sl], mesg)
else:
logger.debug('Using sparse volume morph matrix')
data[to_sl, :] = morph.vol_morph_mat @ data_from[from_sl]
if do_surf:
for hemi, v1, v2 in zip(('left', 'right'),
morph.src_data['vertices_from'],
stc_from.vertices[:2]):
_check_vertices_match(v1, v2, '%s hemisphere' % (hemi,))
from_sl = slice(0, from_surf_stop)
assert not from_used[from_sl].any()
from_used[from_sl] = True
to_sl = slice(0, to_surf_stop)
assert not to_used[to_sl].any()
to_used[to_sl] = True
data[to_sl] = morph.morph_mat * data_from[from_sl]
assert to_used.all()
assert from_used.all()
data.shape = (data.shape[0],) + stc_from.data.shape[1:]
klass = stc_from.__class__
stc_to = klass(data, vertices_to, stc_from.tmin, stc_from.tstep,
morph.subject_to)
return stc_to
| {
"content_hash": "c03bc324d58b9ee386f4995d034817f2",
"timestamp": "",
"source": "github",
"line_count": 1426,
"max_line_length": 102,
"avg_line_length": 41.90953716690042,
"alnum_prop": 0.5855964392684437,
"repo_name": "kambysese/mne-python",
"id": "9dc40425d307d2a1e4a6209316534bc02b9dd1c6",
"size": "59964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/morph.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3912"
},
{
"name": "Python",
"bytes": "5978369"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import os
from flask import Flask, render_template, request, flash
from cookbook import Brewery, Beer
import boto3
from botocore.exceptions import ClientError
app = Flask(__name__)
app.secret_key = os.urandom(24)
table_name= 'dev-brewblog'
dynamo_control = boto3.client('dynamodb')
dynamo_data = boto3.resource('dynamodb', region_name='us-east-1')
beer_table = dynamo_data.Table(table_name)
brewery = Brewery()
@app.route('/')
@app.route('/index/')
@app.route('/beer/')
def index():
full_beer_data = beer_table.scan()
for x in range(0, (int(full_beer_data['Count']))):
beer = full_beer_data['Items'][x]
print beer['gravity']
brewery_query = brewery.find_beer(BeerName=beer['beer_name'])
print brewery_query
if brewery_query == 'NotFound':
brewery.with_beer(b=Beer(Style=beer['beer_style'],
Abv=beer['abv'],
Ibu=beer['ibu'],
Gravity=beer['gravity'],
Name=beer['beer_name']
)
)
return render_template('index.html', list=brewery.beers, title='BrewBlog')
@app.route('/beer/<beer_name>/')
def show_page_details(beer_name):
beer = brewery.find_beer(BeerName=beer_name)
if beer != 'NotFound':
return render_template('beer_detail.html', item=beer)
return render_template('error_messages/beer_not_found.html')
@app.route('/beer/add/', methods=['GET'])
def add_beer():
return render_template('forms/new_beer.html')
@app.route('/beer/add/', methods=['POST'])
def add_beer_post():
response = None
try:
table_metadata = dynamo_control.describe_table(TableName=table_name)
response = beer_table.put_item(
Item={
'beer_id': (int(table_metadata['Table']['ItemCount']) + 1),
'abv': request.form['beerAbv'],
'beer_name': request.form['beerName'],
'beer_style': request.form['beerStyle'],
'gravity': request.form['beerGravity'],
'ibu': request.form['beerIbu']
}
)
print (int(table_metadata['Table']['ItemCount']) + 1)
except ClientError as e:
flash(e.response['Error']['Message'], 'error')
if response is not None:
flash("Beer Has been added", 'success')
return render_template('forms/new_beer.html')
if __name__ == "__main__":
app.testing = False
app.run(debug=True, host='0.0.0.0')
| {
"content_hash": "9f3a012f0d9c86e69703eb932e1c8ee7",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 33.89473684210526,
"alnum_prop": 0.5690993788819876,
"repo_name": "stewmi/brew-blog",
"id": "f1ae654a00fd21eef725f150e796ff052e887343",
"size": "2576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151830"
},
{
"name": "HTML",
"bytes": "4118"
},
{
"name": "Python",
"bytes": "6598"
}
],
"symlink_target": ""
} |
import pyos
import urllib2
from urllib import urlretrieve
from apps.pman.fuzzywuzzy import fuzz
from datetime import timedelta
import time
"""
PMan Specific app.json parameters.
Specified under the key "pman".
{
"pman": {
"depends": [] - A list of packages on which the app depends.
"min_os": 1.0 - A float representing the minimum system version supported.
"onInstalled": <method> - A method located in your app's module that will be called (with no arguments) after the app is installed.
}
}
"""
REPOS = []
def loadRepos():
global REPOS
REPOS = fetchJSON("apps/pman/repos.json")["repos"]
def download(url, to):
try:
urlretrieve(url, to)
return True
except:
return False
def onStart(s, a):
global state, app, pman, cache
state = s
app = a
cache = Cache()
pman = PackageManager()
pman.openScreen(MainScreen())
def internet_on():
try:
urllib2.urlopen('http://www.google.com/', timeout=5)
return True
except urllib2.URLError: pass
return False
def fetchJSON(url):
try:
resource = None
if pyos.os.path.exists(url):
resource = open(url, "rU")
else:
resource = urllib2.urlopen(url)
text = str(unicode(resource.read(), errors="ignore"))
resource.close()
return pyos.json.loads(text)
except:
return None
def readJSON(path):
try:
f = open(path, "rU")
jsd = pyos.json.loads(str(unicode(f.read(), errors="ignore")))
f.close()
return jsd
except:
return {"apps": ""}
class ProgressDialog(pyos.GUI.Overlay):
def __init__(self):
super(ProgressDialog, self).__init__((0, 0), width=app.ui.width, height=app.ui.height, color=state.getColorPalette().getColor("item"))
self.container.border = 1
self.container.borderColor = state.getColorPalette().getColor("background")
self.title = pyos.GUI.Text((2, 2), "PMan is working...", state.getColorPalette().getColor("background"), 16)
self.text = pyos.GUI.ExpandingMultiLineText((0, 0), "Stand by.", state.getColorPalette().getColor("background"), 16,
width=self.width, height=self.height-60)
self.tsc = pyos.GUI.TextScrollableContainer((0, 20), self.text, width=self.width, height=self.height-60,
color=state.getColorPalette().getColor("item"))
self.clsbtn = pyos.GUI.Button((0, self.height-40), "Hide Progress", width=self.width, height=40,
onClick=self.hide)
self.notification = None
self.addChild(self.title)
self.addChild(self.tsc)
self.addChild(self.clsbtn)
def update(self, text):
self.text.setText(text + "\n" + self.text.text)
self.tsc.refresh()
if self.notification != None:
self.notification.text = text
def hide(self, done=False):
if self.displayed:
super(ProgressDialog, self).hide()
if not done:
self.notification = pyos.PermanentNotification("PMan Status", self.text.text, image=app.getIcon())
state.getNotificationQueue().push(self.notification)
class AppIcon(pyos.GUI.Image):
def __init__(self, position, appname, w=40, h=40, **data):
if appname in state.getApplicationList().applications.values():
super(AppIcon, self).__init__(position, surface=state.getApplicationList().getApp(appname).getIcon(), width=w, height=h, **data)
else:
icn = cache.get(appname).get("more", {}).get("icon", "unknown")
if icn in state.getIcons().icons.values():
super(AppIcon, self).__init__(position, path="res/icons/"+icn, width=w, height=h, **data)
else:
if pyos.os.path.exists("temp/pman_"+appname+"_icon.png") or download(cache.get(appname)["remotePath"]+"icon.png", "temp/pman_"+appname+"_icon.png"):
try:
super(AppIcon, self).__init__(position, path="temp/pman_"+appname+"_icon.png", width=w, height=h, **data)
except:
super(AppIcon, self).__init__(position, surface=state.getIcons().getLoadedIcon("unknown"), width=w, height=h, **data)
class AppActionButton(pyos.GUI.Button):
def __init__(self, position, appname, w, h):
self.app = appname
super(AppActionButton, self).__init__(position, "", width=w, height=h)
self.refresh()
def refresh(self):
if self.app in state.getApplicationList().applications.keys():
self.backgroundColor = (200, 250, 200)
if float(cache.get(self.app).get("version", 0.0)) > state.getApplicationList().getApp(self.app).version:
self.setOnClick(Installer(self.app).start)
self.setText("Update")
else:
self.setOnClick(state.getApplicationList().getApp(self.app).activate)
self.setText("Open")
else:
self.backgroundColor = (200, 200, 250)
self.setOnClick(Installer(self.app).start)
self.setText("Install")
super(AppActionButton, self).refresh()
class UIParts:
@staticmethod
def smallAppEntry(appname, onC, fits="appui"):
if fits == "appui": fits = app.ui
cont = pyos.GUI.Container((0, 0), width=fits.computedWidth, height=20, border=1, onClick=onC)
cont.addChild(AppIcon((0, 0), appname, 20, 20, onClick=onC))
cont.addChild(pyos.GUI.Text((22, 2), cache.get(appname)["title"], pyos.DEFAULT, 16, onClick=onC))
cont.addChild(AppActionButton((cont.computedWidth-40, 0), appname, 40, 20))
return cont
@staticmethod
def normalAppEntry(appname, onC, fits="appui"):
if fits == "appui": fits = app.ui
cont = pyos.GUI.Container((0, 0), width=fits.computedWidth, height=40, border=1, onClick=onC)
cont.addChild(AppIcon((0, 0), appname, 40, 40, onClick=onC))
cont.addChild(pyos.GUI.Text((42, 2), cache.get(appname)["title"], pyos.DEFAULT, 18, onClick=onC))
cont.addChild(pyos.GUI.Text((42, 22), cache.get(appname)["author"], pyos.DEFAULT, 14, onClick=onC))
cont.addChild(AppActionButton((cont.computedWidth-60, 0), appname, 60, 40))
return cont
@staticmethod
def largeAppEntry(appname, onC, fits="appui"):
if fits == "appui": fits = app.ui
cont = pyos.GUI.Container((0, 0), width=fits.computedWidth, height=64, border=1, onClick=onC)
cont.addChild(AppIcon((0, 0), appname, 40, 40, onClick=onC))
cont.addChild(pyos.GUI.Text((42, 2), cache.get(appname)["title"], pyos.DEFAULT, 18, onClick=onC))
cont.addChild(pyos.GUI.Text((42, 22), cache.get(appname)["author"], pyos.DEFAULT, 14, onClick=onC))
cont.addChild(AppActionButton((cont.computedWidth-60, 0), appname, 60, 40))
dt = cache.get(appname).get("description", "No Description.")
cont.addChild(pyos.GUI.MultiLineText((2, 40), dt[:dt.find(".")], pyos.DEFAULT, 12, width=cont.computedWidth, height=24))
return cont
class SizeSelector(pyos.GUI.Selector):
def __init__(self, position, w, h, oVc):
self.size = app.dataStore.get("sel_size", "Small")
self.choices = ["Small", "Normal", "Detailed"]
super(SizeSelector, self).__init__(position, self.getChoices(), width=w, height=h,
onValueChanged=oVc)
def getChoices(self):
lc = self.choices[:]
lc.remove(self.size)
return [self.size] + lc
def getEntry(self, appname, onC, fits="appui"):
val = self.size
if val == "Small": return UIParts.smallAppEntry(appname, onC, fits)
if val == "Normal": return UIParts.normalAppEntry(appname, onC, fits)
if val == "Detailed": return UIParts.largeAppEntry(appname, onC, fits)
class BackBtn(pyos.GUI.Image):
def __init__(self, position):
super(BackBtn, self).__init__(position, surface=state.getIcons().getLoadedIcon("back"),
onClick=pman.closeLast)
class Screen(pyos.GUI.Container):
def __init__(self, name):
self.name = name
super(Screen, self).__init__((0, 0), width=app.ui.width, height=app.ui.height)
def activate(self):
if self not in pman.screens:
pman.openScreen(self)
return
self.oldtitle = state.getFunctionBar().app_title_text.text
state.getFunctionBar().app_title_text.setText(self.name)
app.ui.clearChildren()
app.ui.addChild(self)
def deactivate(self):
state.getFunctionBar().app_title_text.setText(self.oldtitle)
class AppScreen(Screen):
def __init__(self, appname):
self.appname = appname
super(AppScreen, self).__init__(cache.get(appname).get("title", appname))
self.refresh()
def refresh(self):
self.clearChildren()
self.data = cache.get(self.appname)
self.addChild(UIParts.normalAppEntry(self.appname, pyos.Application.dummy))
self.addChild(pyos.GUI.Text((2, 42), "Package: "+self.appname))
self.addChild(pyos.GUI.Text((2, 58), "Version "+str(self.data.get("version", 0.0))))
if self.appname in app.dataStore.get("featured", []): self.addChild(pyos.GUI.Text((2, 74), "Featured", (250, 150, 150)))
self.addChild(pyos.GUI.MultiLineText((2, 90), self.data.get("description", "No Description"), width=app.ui.width, height=(app.ui.height-130)))
self.addChild(BackBtn((0, self.height-40)))
self.addChild(pyos.GUI.Button((40, self.height-40), "More by "+self.data.get("author"),
state.getColorPalette().getColor("dark:background"), width=app.ui.width-40, height=40,
onClick=AppListScreen.ondemand,
onClickData=([a for a in cache.data.keys() if cache.get(a).get("author") == self.data.get("author")],)))
class UpdateScreen(Screen):
def __init__(self):
super(UpdateScreen, self).__init__("Updates")
self.refresh()
@staticmethod
def ondemand():
UpdateScreen().activate()
def bgLoad(self, sel=None):
if sel != None: app.dataStore["sel_size"] = sel
self.removeChild(self.sizesel)
self.sizesel = SizeSelector((app.ui.width-100, 0), 100, 40, self.bgLoad)
self.addChild(self.sizesel)
self.scroller.clearChildren()
txt = pyos.GUI.Text((0, 0), "Loading...")
self.scroller.addChild(txt)
au = 0
for lapp in sorted(state.getApplicationList().getApplicationList(), key=lambda x: x.title):
if cache.get(lapp.name).get("version", 0.0) > lapp.version:
self.scroller.addChild(self.sizesel.getEntry(lapp.name, AppScreen(lapp.name).activate, self.scroller.container))
au += 1
self.statustxt.setText(str(au)+" Updates")
self.scroller.removeChild(txt)
def refresh(self):
self.clearChildren()
self.scroller = pyos.GUI.ListScrollableContainer((0, 40), width=app.ui.width, height=app.ui.height-40)
self.statustxt = pyos.GUI.Text((42, 11), "0 Updates", pyos.DEFAULT, 18)
self.back = BackBtn((0, 0))
self.sizesel = SizeSelector((app.ui.width-100, 0), 100, 40, self.bgLoad)
self.addChildren(self.scroller, self.statustxt, self.back, self.sizesel)
state.getThreadController().addThread(pyos.ParallelTask(self.bgLoad))
class AppListScreen(Screen):
def __init__(self, apps):
self.apps = apps
super(AppListScreen, self).__init__("Apps")
self.refresh()
@staticmethod
def ondemand(apps):
AppListScreen(apps).activate()
def bgLoad(self, sel=None):
if sel != None: app.dataStore["sel_size"] = sel
self.removeChild(self.sizesel)
self.sizesel = SizeSelector((app.ui.width-100, 0), 100, 40, self.bgLoad)
self.addChild(self.sizesel)
self.scroller.clearChildren()
txt = pyos.GUI.Text((0, 0), "Loading...")
self.scroller.addChild(txt)
au = 0
for a in sorted(self.apps, key=lambda x: cache.get(x, {"title": x}).get("title")):
self.scroller.addChild(self.sizesel.getEntry(a, AppScreen(a).activate, self.scroller.container))
au += 1
self.statustxt.setText(str(au)+" Apps")
self.scroller.removeChild(txt)
def refresh(self):
self.clearChildren()
self.scroller = pyos.GUI.ListScrollableContainer((0, 40), width=app.ui.width, height=app.ui.height-40)
self.statustxt = pyos.GUI.Text((42, 11), "0 Apps", pyos.DEFAULT, 18)
self.back = BackBtn((0, 0))
self.sizesel = SizeSelector((app.ui.width-100, 0), 100, 40, self.bgLoad)
self.addChildren(self.scroller, self.statustxt, self.back, self.sizesel)
state.getThreadController().addThread(pyos.ParallelTask(self.bgLoad))
class SearchScreen(Screen):
def __init__(self, query):
self.query = query.lower()
super(SearchScreen, self).__init__("Search Results")
self.refresh()
@staticmethod
def ondemand(query):
SearchScreen(query).activate()
def bgLoad(self, sel=None):
if sel != None: app.dataStore["sel_size"] = sel
self.removeChild(self.sizesel)
self.sizesel = SizeSelector((app.ui.width-80, 0), 80, 40, self.bgLoad)
self.addChild(self.sizesel)
self.scroller.clearChildren()
txt = pyos.GUI.Text((0, 0), "Loading...")
self.scroller.addChild(txt)
results = {}
for a in cache.data.keys():
r = fuzz.ratio(self.query, a)
r += fuzz.ratio(self.query, cache.get(a).get("title").lower())
r += fuzz.token_sort_ratio(self.query, cache.get(a).get("description", "").lower())
ar = fuzz.ratio(self.query, cache.get(a).get("author", "").lower())
if r >110 or ar > 60:
results[a] = r+ar
for ra in sorted(results.keys(), key=lambda x: results[x], reverse=True):
self.scroller.addChild(self.sizesel.getEntry(ra, AppScreen(ra).activate, self.scroller.container))
self.scroller.removeChild(txt)
def setQuery(self):
self.query = self.statustxt.getText().lower()
pman.refresh()
def refresh(self):
self.clearChildren()
self.scroller = pyos.GUI.ListScrollableContainer((0, 40), width=app.ui.width, height=app.ui.height-40)
self.statustxt = pyos.GUI.TextEntryField((40, 0), self.query, width=self.width-160, height=40)
self.submitbtn = pyos.GUI.Image((self.width-120, 0), surface=state.getIcons().getLoadedIcon("search"),
onClick=self.setQuery)
self.back = BackBtn((0, 0))
self.sizesel = SizeSelector((app.ui.width-80, 0), 80, 40, self.bgLoad)
self.addChildren(self.scroller, self.statustxt, self.submitbtn, self.back, self.sizesel)
state.getThreadController().addThread(pyos.ParallelTask(self.bgLoad))
class MainScreen(Screen):
def __init__(self):
super(MainScreen, self).__init__("Apps")
self.refresh()
def refresh(self):
self.clearChildren()
if internet_on():
self.addChild(pyos.GUI.Button((40, self.height-80), "Update Database", (107, 148, 103), width=self.width-40, height=40,
onClick=cache.bgUpdate))
self.addChild(pyos.GUI.Image((0, self.height-80), surface=state.getIcons().getLoadedIcon("save"),
onClick=cache.bgUpdate))
self.featuredHerald = pyos.GUI.Container((0, 0), width=self.width, height=20, color=(51, 183, 255))
self.featuredHerald.addChild(pyos.GUI.Text((2, 2), "Featured Apps", pyos.DEFAULT, 16))
self.featuredShowcase = pyos.GUI.ListPagedContainer((0, 20), width=self.width, height=90,
border=1, borderColor=(51, 183, 255))
if app.dataStore.get("featured", []) == []:
self.featuredShowcase.addChild(pyos.GUI.Text((5, 5), "No Featured Apps."))
else:
for fa in app.dataStore.get("featured", []):
self.featuredShowcase.addChild(UIParts.largeAppEntry(fa, AppScreen(fa).activate))
self.featuredShowcase.goToPage()
self.addChild(self.featuredHerald)
self.addChild(self.featuredShowcase)
self.addChild(pyos.GUI.Button((0, self.height-120), "Updates", (255, 187, 59), width=self.width/2, height=40,
onClick=UpdateScreen.ondemand))
self.addChild(pyos.GUI.Button((self.width/2, self.height-120), "All Apps", (148, 143, 133), width=self.width/2, height=40,
onClick=AppListScreen.ondemand, onClickData=(cache.data.keys(),)))
self.searchBar = pyos.GUI.TextEntryField((0, self.height-160), "", width=self.width-40, height=40)
self.addChild(pyos.GUI.Image((self.width-40, self.height-160), surface=state.getIcons().getLoadedIcon("search"),
onClick=self.search))
self.addChild(self.searchBar)
self.addChild(pyos.GUI.Image((0, self.height-40), surface=state.getIcons().getLoadedIcon("open"),
onClick=Installer.localAsk))
self.addChild(pyos.GUI.Button((40, self.height-40), "Install from File", (194, 89, 19), width=self.width-40, height=40,
onClick=Installer.localAsk))
def search(self):
SearchScreen.ondemand(self.searchBar.getText())
class Cache(pyos.DataStore):
def __init__(self, doDialog=True):
self.dsPath = "apps/pman/cache.json"
self.application = app
self.featured = []
self.progressInfo = "Updating Cache"
self.dialog = None if not doDialog else ProgressDialog()
self.data = {}
def setPrgInfo(self, txt):
print txt
self.progressInfo = txt
if self.dialog != None:
self.dialog.update(txt)
if txt == "Done.":
self.dialog.hide(True)
self.dialog = ProgressDialog()
def update(self):
self.data = {}
self.featured = []
if self.dialog != None:
self.dialog.display()
self.saveStore()
cr = 0
for repo in REPOS:
cr += 1
self.setPrgInfo("R("+str(cr)+"): "+repo)
if download(repo+"/apps.json", "temp/apps.json"):
ca = 0
rman = readJSON("temp/apps.json")
for rapp in rman["apps"]:
ca += 1
self.setPrgInfo("R("+str(cr)+") A("+str(ca)+"): "+rapp)
if download(repo+"/"+rman["apps_dir"]+"/"+rapp+"/app.json", "temp/app.json"):
aman = readJSON("temp/app.json")
aman["remotePath"] = repo+"/"+rman["apps_dir"]+"/"+rapp+"/"
self.set(rapp, aman)
finf = rman.get("featured", None)
if finf != None:
if isinstance(finf, basestring):
self.featured.append(rman["featured"])
else:
for f in rman["featured"]: self.featured.append(f)
for f in rman.get("featured_list", []): self.featured.append(f)
self.setPrgInfo("Cleaning Up...")
try:
pyos.os.remove("temp/apps.json")
pyos.os.remove("temp/app.json")
for trc in pyos.os.listdir("temp/"):
if trc.startswith("pman_"):
pyos.os.remove("temp/"+trc)
except:
pass
app.dataStore["lastUpdate"] = pyos.datetime.strftime(pyos.datetime.now(), "%a %b %d %H:%M:%S %Y")
app.dataStore["featured"] = self.featured
self.setPrgInfo("Done.")
pman.refresh()
def bgUpdate(self):
state.getThreadController().addThread(pyos.ParallelTask(self.update))
class Installer(object):
def __init__(self, appname, local=False):
self.local = local
self.name = appname
def start(self):
if self.local:
try:
zf = pyos.ZipFile(self.name, "r")
zf.extract("app.json", "temp/")
self.path = self.name
jd = readJSON("temp/app.json")
self.name = jd["name"]
if jd.get("pman", {}).get("min_os", 0.0) > pman.sysInf.get("version"):
pyos.GUI.ErrorDialog("The package requires a newer version of Python OS.").display()
return
except:
pyos.GUI.ErrorDialog("The file "+self.name+" is not a valid Python OS ZIP File.").display()
return
pyos.GUI.YNDialog("Install",
"Are you sure you want to install the package "+self.name+"? This will install the app and any unmet dependencies.",
self.confirm).display()
self.dialog = ProgressDialog()
@staticmethod
def localInstallSelect(path):
Installer(path, True).start()
@staticmethod
def localAsk():
state.getApplicationList().getApp("files").getModule().FilePicker((10, 10), app, width=app.ui.width-20, height=app.ui.height-20,
onSelect=Installer.localInstallSelect).display()
@staticmethod
def getDependencies(appname):
deps = cache.get(appname).get("pman", {}).get("depends", [])
print appname + " depends on " + str(deps)
for d in deps:
if d == appname:
print "Warning: The app "+appname+" depends on itself."
deps.remove(d)
continue
sd = cache.get(d).get("pman", {}).get("depends", [])
for s in sd:
if s not in deps and s != appname:
deps.append(s)
return deps
def confirm(self, resp):
if resp == "Yes":
self.dialog.display()
self.dialog.update("Requested install of "+self.name)
state.getThreadController().addThread(pyos.ParallelTask(self.install))
def install(self):
deps = Installer.getDependencies(self.name)
toinst = [self.name] + deps
post_install = []
for tia in toinst:
if not self.local and tia in state.getApplicationList().getApplicationNames() and cache.get(tia).get("version") <= state.getApplicationList().getApp(tia).version:
print tia + " is already installed at the newest version."
toinst.remove(tia)
continue
if cache.get(tia).get("pman", {}).get("min_os", 0.0) > pman.sysInf.get("version"):
self.dialog.update("!!! The install cannot continue because the package "+tia+" requires a newer version of Python OS.")
return
pim = cache.get(tia).get("pman", {}).get("onInstalled", None)
if pim != None:
post_install.append([tia, pim])
if toinst == []:
print self.name+" and all its dependencies are already installed."
return
self.dialog.update("The following packages will be installed:")
for p in toinst:
self.dialog.update(" - "+p)
c = 0
for package in toinst:
self.dialog.update(str(c)+": Working on "+package)
if package == self.name and self.local:
self.dialog.update("... Installing from local package.")
try:
self.dialog.update("... Installed.")
pyos.Application.install(self.path)
except:
self.dialog.update("!!! Install failed. Aborted.")
break
try:
pyos.os.remove("temp/pman_app.json")
except:
pass
c += 1
continue
if download(cache.get(package)["remotePath"]+package+".zip", "temp/pman_package.zip"):
self.dialog.update("... Downloaded.")
try:
pyos.Application.install("temp/pman_package.zip")
self.dialog.update("... Installed.")
except:
self.dialog.update("!!! Install failed. Aborted.")
break
else:
self.dialog.update("!!! Download failed. Install aborted.")
break
c += 1
self.dialog.update("Running post-install operations...")
for pia in post_install:
try:
getattr(state.getApplicationList().getApp(pia[0]).getModule(), pia[1])()
self.dialog.update("... "+pia[0])
except:
self.dialog.update("!!! Error running. "+pia[0]+" might be broken.")
self.dialog.update("Finished. Cleaning up...")
try:
pyos.os.remove("temp/pman_package.zip")
except:
pass
state.getApplicationList().reloadList()
self.dialog.update("Done.")
self.dialog.hide(True)
pman.refresh()
class PackageManager(object):
def __init__(self):
self.screens = []
self.sysInf = readJSON("res/system.json")
loadRepos()
self.checkDBFresh()
def refresh(self):
self.screens[len(self.screens)-1].refresh()
def checkDBFresh(self):
lupd = app.dataStore.get("lastUpdate", None)
try:
if len(cache.getStore().keys()) <= 1:
print "Empty Cache"
app.dataStore.set("featured", [])
raise AttributeError
except:
lupd = None
if lupd != None: diffdel = (pyos.datetime.now() - pyos.datetime.strptime(lupd, "%a %b %d %H:%M:%S %Y"))
if lupd == None or diffdel > timedelta(days=1):
cache.bgUpdate()
def openScreen(self, s):
self.screens.append(s)
s.activate()
def closeLast(self):
self.screens.pop().deactivate()
self.screens[len(self.screens)-1].activate()
| {
"content_hash": "595239a4fd8d37e060a15ec76427b8bd",
"timestamp": "",
"source": "github",
"line_count": 596,
"max_line_length": 174,
"avg_line_length": 45.17953020134228,
"alnum_prop": 0.5649348237828202,
"repo_name": "furmada/PythonOS",
"id": "8c15cc6a06aa58d6c6f32b08a99088704bd06c07",
"size": "26927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/pman/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "281422"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cdadmap', '0037_auto_20151106_1149'),
]
operations = [
migrations.AddField(
model_name='surveypanel',
name='instagram',
field=models.URLField(max_length=2000, null=True, blank=True),
),
migrations.AddField(
model_name='surveypanel',
name='nextdoor',
field=models.URLField(max_length=2000, null=True, blank=True),
),
migrations.AddField(
model_name='surveypanel',
name='youtube',
field=models.URLField(max_length=2000, null=True, blank=True),
),
]
| {
"content_hash": "07ab2039798d4ec018e3eef8c30969e9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 27.535714285714285,
"alnum_prop": 0.5758754863813229,
"repo_name": "NiJeLorg/CDADMap",
"id": "d35f8b6cefcfe92dada730041735f84798dcffcf",
"size": "795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cdad/cdadmap/migrations/0038_auto_20151106_1332.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31722"
},
{
"name": "HTML",
"bytes": "114105"
},
{
"name": "JavaScript",
"bytes": "134735"
},
{
"name": "Python",
"bytes": "165621"
},
{
"name": "Shell",
"bytes": "1691"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
from sklearn.model_selection import KFold
import numpy as np
def loo(dataset):
"""
Leave one out experiment
:param dataset: Dataset object
:return:
"""
for index, item in enumerate(dataset):
yield [item], dataset[:index] + dataset[index + 1:]
def test_train(dataset, test_size=1):
"""
Split into training and test set
:param dataset: Dataset object
:param test_size: Test set size
:return:
"""
data = dataset[:]
dataset_size = len(data)
if isinstance(test_size, float):
test_size = int(round(data*test_size))
for i in range(dataset_size-test_size):
yield dataset[i:i+test_size], data[:i] + data[i+1:]
def kfold(dataset, n_folds, shuffle=False, random_state=1):
"""
KFold cross validation technique
:param dataset:
:param n_folds:
:param shuffle:
:param random_state:
:return:
"""
dataset_np = np.array(dataset)
kf = KFold(n_splits=n_folds, shuffle=shuffle, random_state=random_state)
for train_index, test_index in kf.split(dataset_np):
yield dataset_np[test_index].tolist(), dataset_np[train_index].tolist()
| {
"content_hash": "84f2e6837ee29dd598cbb8cf8c91ca0a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 25.5,
"alnum_prop": 0.6887254901960784,
"repo_name": "dr-bigfatnoob/effort",
"id": "c7ab8d05c4771c1c1cf1938ded03c489361c8eb1",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126825"
},
{
"name": "Shell",
"bytes": "163"
}
],
"symlink_target": ""
} |
from collections import deque
from pylimitbook.tick import Bid, Ask, Trade
from pylimitbook.tree import Tree
from builtins import input
from six.moves import cStringIO as StringIO
def parse_csv(columns, line):
"""
Parse a CSV line that has ',' as a separator.
Columns is a list of the column names, must match the number of
comma-separated values in the input line.
"""
data = {}
split = line.split(',')
for idx, name in enumerate(columns):
data[name] = split[idx]
return data
class Book(object):
def __init__(self):
self.trades = deque(maxlen=100) # Index [0] is most recent trade
self.bids = Tree()
self.asks = Tree()
self.last_tick = None
self.last_timestamp = 0
def process_bid_ask(self, tick):
"""
Generic method to process bid or ask.
"""
tree = self.asks
if tick.is_bid:
tree = self.bids
if tick.qty == 0:
# Quantity is zero -> remove the entry
tree.remove_order_by_id(tick.id_num)
else:
if tree.order_exists(tick.id_num):
tree.update_order(tick)
else:
# New order
tree.insert_tick(tick)
def bid(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
bid = Bid(data)
if bid.timestamp > self.last_timestamp:
self.last_timestamp = bid.timestamp
self.last_tick = bid
self.process_bid_ask(bid)
return bid
def bid_split(self, symbol, id_num, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': id_num
}
bid = Bid(data)
if bid.timestamp > self.last_timestamp:
self.last_timestamp = bid.timestamp
self.last_tick = bid
self.process_bid_ask(bid)
return bid
def ask(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
ask = Ask(data)
if ask.timestamp > self.last_timestamp:
self.last_timestamp = ask.timestamp
self.last_tick = ask
self.process_bid_ask(ask)
return ask
def ask_split(self, symbol, id_num, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': id_num
}
ask = Ask(data)
if ask.timestamp > self.last_timestamp:
self.last_timestamp = ask.timestamp
self.last_tick = ask
self.process_bid_ask(ask)
return ask
def trade(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
data['id_num'] = 0
trade = Trade(data)
if trade.timestamp > self.last_timestamp:
self.last_timestamp = trade.timestamp
self.last_tick = trade
self.trades.appendleft(trade)
return trade
def trade_split(self, symbol, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': 0
}
trade = Trade(data)
if trade.timestamp > self.last_timestamp:
self.last_timestamp = trade.timestamp
self.last_tick = trade
self.trades.appendleft(trade)
return trade
def __str__(self):
# Efficient string concat
file_str = StringIO()
file_str.write("------ Bids -------\n")
if self.bids != None and len(self.bids) > 0:
for k, v in self.bids.price_tree.items(reverse=True):
file_str.write('%s' % v)
file_str.write("\n------ Asks -------\n")
if self.asks != None and len(self.asks) > 0:
for k, v in self.asks.price_tree.items():
file_str.write('%s' % v)
file_str.write("\n------ Trades ------\n")
if self.trades != None and len(self.trades) > 0:
num = 0
for entry in self.trades:
if num < 5:
file_str.write(str(entry.qty) + " @ " \
+ str(entry.price / 10000) \
+ " (" + str(entry.timestamp) + ")\n")
num += 1
else:
break
file_str.write("\n")
return file_str.getvalue()
| {
"content_hash": "baff388d4e95f488df364bf2069f82d9",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 88,
"avg_line_length": 32.843971631205676,
"alnum_prop": 0.5173828546750162,
"repo_name": "danielktaylor/PyLimitBook",
"id": "cbabad47180d13ff927635dfa92ac48c27e8cf2b",
"size": "4650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylimitbook/book.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38255"
}
],
"symlink_target": ""
} |
import re
import fnmatch
import IECore
import Gaffer
import GafferUI
## \todo Potential optimisation.
# Currently every PlugValueWidget instance connects to the various plug signals
# on the node, and tests to see if the plug is of interest when signalled. When many
# PlugValueWidgets are instantiated for a node this might prove a little slow. In this
# eventuality perhaps we can optimise things by having CompoundPlugValueWidget be
# responsible for updating only the correct child, so the children don't need to be
# connected themselves. PlugValueWidget would need to maintain the ability to do things
# itself when used alone, but this might give a good speedup for the most common case.
class PlugValueWidget( GafferUI.Widget ) :
def __init__( self, topLevelWidget, plug, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
# we don't want to call _updateFromPlug yet because the derived
# classes haven't constructed yet. they can call it themselves
# upon completing construction.
self.__setPlugInternal( plug, callUpdateFromPlug=False )
self.__popupMenuConnections = []
self.__readOnly = False
self.__dragEnterConnection = self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dragLeaveConnection = self.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ) )
self.__dropConnection = self.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) )
## Note that it is acceptable to pass None to setPlug() (and to the constructor)
# and that derived classes should be implemented to cope with this eventuality.
def setPlug( self, plug ) :
self.__setPlugInternal( plug, callUpdateFromPlug=True )
def getPlug( self ) :
return self.__plug
## By default, PlugValueWidgets operate in the main context held by the script node
# for the script the plug belongs to. This function allows an alternative context
# to be provided, making it possible to view a plug at a custom frame (or with any
# other context modification).
def setContext( self, context ) :
assert( isinstance( context, Gaffer.Context ) )
if context is self.__context :
return
self.__context = context
self.__updateContextConnection()
self._updateFromPlug()
def getContext( self ) :
return self.__context
## This method allows editing of the plug value
# to be disabled for this ui. Note that even when getReadOnly()
# is False, the ui may not allow editing due to the plug
# itself being read only for other reasons.
def setReadOnly( self, readOnly ) :
assert( isinstance( readOnly, bool ) )
if readOnly == self.__readOnly :
return
self.__readOnly = readOnly
self._updateFromPlug()
def getReadOnly( self ) :
return self.__readOnly
## Should be reimplemented to return True if this widget includes
# some sort of labelling for the plug. This is used to prevent
# extra labels being created in the NodeUI when they're not necessary.
def hasLabel( self ) :
return False
## Implemented to return a tooltip containing the plug name and description.
def getToolTip( self ) :
result = GafferUI.Widget.getToolTip( self )
if result :
return result
plug = self.getPlug()
if plug is None :
return ""
input = plug.getInput()
inputText = ""
if input is not None :
inputText = " <- " + input.relativeName( input.commonAncestor( plug, Gaffer.GraphComponent.staticTypeId() ) )
result = "<h3>" + plug.relativeName( plug.node() ) + inputText + "</h3>"
description = Gaffer.Metadata.plugDescription( plug )
if description :
result += "\n\n" + description
return result
## Must be implemented by subclasses so that the widget reflects the current
# status of the plug. To temporarily suspend calls to this function, use
# Gaffer.BlockedConnection( self._plugConnections() ).
def _updateFromPlug( self ) :
raise NotImplementedError
def _plugConnections( self ) :
return [
self.__plugDirtiedConnection,
self.__plugInputChangedConnection,
self.__plugFlagsChangedConnection
]
## Returns True if the plug value is editable as far as this ui is concerned
# - that plug.settable() is True and self.getReadOnly() is False.
def _editable( self ) :
plug = self.getPlug()
if plug is None :
return False
if hasattr(plug, 'settable') and not plug.settable():
return False
if self.__readOnly :
return False
return True
## Adds a useful popup menu to the specified widget, providing useful functions that
# operate on the plug. The menu is populated with the result of _popupMenuDefinition(),
# and may also be customised by external code using the popupMenuSignal().
def _addPopupMenu( self, widget = None, buttons = GafferUI.ButtonEvent.Buttons.Right ) :
if widget is None :
widget = self
# it's unclear under what circumstances we get given a right-click vs a context menu event,
# but we try to cover all our bases by connecting to both.
self.__popupMenuConnections.append(
widget.buttonPressSignal().connect( IECore.curry( Gaffer.WeakMethod( self.__buttonPress ), buttonMask = buttons ) )
)
if buttons & GafferUI.ButtonEvent.Buttons.Right :
self.__popupMenuConnections.append(
widget.contextMenuSignal().connect( IECore.curry( Gaffer.WeakMethod( self.__contextMenu ) ) )
)
## Returns a definition for the popup menu - this is called each time the menu is displayed
# to allow for dynamic menus. Subclasses may override this method to customise the menu, but
# should call the base class implementation first.
def _popupMenuDefinition( self ) :
menuDefinition = IECore.MenuDefinition()
if self.getPlug().getInput() is not None :
menuDefinition.append( "/Edit input...", { "command" : Gaffer.WeakMethod( self.__editInput ) } )
menuDefinition.append( "/EditInputDivider", { "divider" : True } )
menuDefinition.append(
"/Remove input", {
"command" : Gaffer.WeakMethod( self.__removeInput ),
"active" : self.getPlug().acceptsInput( None ) and not self.getReadOnly(),
}
)
if hasattr( self.getPlug(), "defaultValue" ) and self.getPlug().direction() == Gaffer.Plug.Direction.In :
menuDefinition.append(
"/Default", {
"command" : IECore.curry( Gaffer.WeakMethod( self.__setValue ), self.getPlug().defaultValue() ),
"active" : self._editable()
}
)
self.popupMenuSignal()( menuDefinition, self )
return menuDefinition
__popupMenuSignal = Gaffer.Signal2()
## This signal is emitted whenever a popup menu for a plug is about
# to be shown. This provides an opportunity to customise the menu from
# external code. The signature for slots is ( menuDefinition, plugValueWidget ),
# and slots should just modify the menu definition in place.
@classmethod
def popupMenuSignal( cls ) :
return cls.__popupMenuSignal
## Returns a PlugValueWidget suitable for representing the specified plug. If
# useTypeOnly is True, then custom registrations made by registerCreator() will
# be ignored and only the plug type will be taken into account in creating a
# PlugValueWidget.
@classmethod
def create( cls, plug, useTypeOnly=False ) :
# first try to create one using a creator registered for the specific plug
if not useTypeOnly :
node = plug.node()
if node is not None :
plugPath = plug.relativeName( node )
nodeHierarchy = IECore.RunTimeTyped.baseTypeIds( node.typeId() )
for nodeTypeId in [ node.typeId() ] + nodeHierarchy :
creators = cls.__nodeTypesToCreators.get( nodeTypeId, None )
if creators :
for creator in creators :
if creator.plugPathMatcher.match( plugPath ) :
if creator.creator is not None :
return creator.creator( plug, **(creator.creatorKeywordArgs) )
else :
return None
# if that failed, then just create something based on the type of the plug
typeId = plug.typeId()
for plugTypeId in [ plug.typeId() ] + IECore.RunTimeTyped.baseTypeIds( plug.typeId() ) :
if plugTypeId in cls.__plugTypesToCreators :
creator = cls.__plugTypesToCreators[plugTypeId]
if creator is not None :
return creator( plug )
else :
return None
return None
## Registers a PlugValueWidget type for a specific Plug type. Note
# that the registerCreator function below provides the
# opportunity to further customise the type of Widget used for specific
# plug instances based on the node type and plug name.
@classmethod
def registerType( cls, plugTypeId, creator ) :
cls.__plugTypesToCreators[plugTypeId] = creator
## Registers a function to create a PlugWidget. None may be passed as creator, to
# disable the creation of uis for specific plugs.
@classmethod
def registerCreator( cls, nodeTypeId, plugPath, creator, **creatorKeywordArgs ) :
if isinstance( plugPath, basestring ) :
plugPath = re.compile( fnmatch.translate( plugPath ) )
else :
assert( type( plugPath ) is type( re.compile( "" ) ) )
creators = cls.__nodeTypesToCreators.setdefault( nodeTypeId, [] )
creator = IECore.Struct(
plugPathMatcher = plugPath,
creator = creator,
creatorKeywordArgs = creatorKeywordArgs,
)
creators.insert( 0, creator )
__plugTypesToCreators = {}
__nodeTypesToCreators = {}
def __plugDirtied( self, plug ) :
if plug.isSame( self.__plug ) :
self._updateFromPlug()
def __plugInputChanged( self, plug ) :
if plug.isSame( self.__plug ) :
self.__updateContextConnection()
self._updateFromPlug()
def __plugFlagsChanged( self, plug ) :
if plug.isSame( self.__plug ) :
self._updateFromPlug()
def __contextChanged( self, context, key ) :
self._updateFromPlug()
def __setPlugInternal( self, plug, callUpdateFromPlug ) :
self.__plug = plug
context = self.__fallbackContext
if self.__plug is not None :
self.__plugDirtiedConnection = plug.node().plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__plugDirtied ) )
self.__plugInputChangedConnection = plug.node().plugInputChangedSignal().connect( Gaffer.WeakMethod( self.__plugInputChanged ) )
self.__plugFlagsChangedConnection = plug.node().plugFlagsChangedSignal().connect( Gaffer.WeakMethod( self.__plugFlagsChanged ) )
scriptNode = self.__plug.ancestor( Gaffer.ScriptNode.staticTypeId() )
if scriptNode is not None :
context = scriptNode.context()
else :
self.__plugDirtiedConnection = None
self.__plugInputChangedConnection = None
self.__plugFlagsChangedConnection = None
self.__context = context
self.__updateContextConnection()
if callUpdateFromPlug :
self._updateFromPlug()
def __updateContextConnection( self ) :
# we only want to be notified of context changes if we have a plug and that
# plug has an incoming connection. otherwise context changes are irrelevant
# and we'd just be slowing things down by asking for notifications.
context = self.__context
plug = self.getPlug()
if plug is None or plug.getInput() is None :
context = None
if context is not None :
self.__contextChangedConnection = context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
else :
self.__contextChangedConnection = None
# we use this when the plug being viewed doesn't have a ScriptNode ancestor
# to provide a context.
__fallbackContext = Gaffer.Context()
def __buttonPress( self, widget, event, buttonMask ) :
if event.buttons & buttonMask :
return self.__contextMenu()
return False
def __contextMenu( self, *unused ) :
if self.getPlug() is None :
return False
menuDefinition = self._popupMenuDefinition()
if not len( menuDefinition.items() ) :
return False
title = self.getPlug().relativeName( self.getPlug().node() )
title = ".".join( [ IECore.CamelCase.join( IECore.CamelCase.split( x ) ) for x in title.split( "." ) ] )
self.__popupMenu = GafferUI.Menu( menuDefinition, title = title )
self.__popupMenu.popup()
return True
def __setValue( self, value ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
self.getPlug().setValue( value )
def __editInput( self ) :
nodeEditor = GafferUI.NodeEditor.acquire( self.getPlug().getInput().node() )
if nodeEditor is None :
return
plugValueWidget = nodeEditor.nodeUI().plugValueWidget( self.getPlug().getInput() )
if plugValueWidget is None :
return
plugValueWidget.reveal()
def __removeInput( self ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
self.getPlug().setInput( None )
# drag and drop stuff
def __dragEnter( self, widget, event ) :
if self.getReadOnly() :
return False
if isinstance( event.sourceWidget, GafferUI.PlugValueWidget ) :
sourcePlugValueWidget = event.sourceWidget
else :
sourcePlugValueWidget = event.sourceWidget.ancestor( GafferUI.PlugValueWidget )
if sourcePlugValueWidget is not None and sourcePlugValueWidget.getPlug().isSame( self.getPlug() ) :
return False
if isinstance( event.data, Gaffer.Plug ) :
if self.getPlug().acceptsInput( event.data ) :
self.setHighlighted( True )
return True
elif hasattr( self.getPlug(), "setValue" ) and self._dropValue( event ) is not None :
if self.getPlug().settable() :
self.setHighlighted( True )
return True
return False
def __dragLeave( self, widget, event ) :
self.setHighlighted( False )
def __drop( self, widget, event ) :
self.setHighlighted( False )
with Gaffer.UndoContext( self.getPlug().node().scriptNode() ) :
if isinstance( event.data, Gaffer.Plug ) :
self.getPlug().setInput( event.data )
else :
self.getPlug().setValue( self._dropValue( event ) )
return True
## Called from a dragEnter slot to see if the drag data can
# be converted to a value suitable for a plug.setValue() call.
# If this returns a non-None value then the drag will be accepted
# and plug.setValue() will be called in the drop event. May be
# reimplemented by derived classes to provide conversions of the
# drag data to the type needed for setValue().
def _dropValue( self, dragDropEvent ) :
if not hasattr( self.getPlug(), "defaultValue" ) :
return None
plugValueType = type( self.getPlug().defaultValue() )
if isinstance( dragDropEvent.data, plugValueType ) :
return dragDropEvent.data
elif isinstance( dragDropEvent.data, IECore.Data ) :
dataValue = None
if hasattr( dragDropEvent.data, "value" ) :
dataValue = dragDropEvent.data.value
else :
with IECore.IgnoredExceptions( Exception ) :
if len( dragDropEvent.data ) == 1 :
dataValue = dragDropEvent.data[0]
if dataValue is None :
return None
elif isinstance( dataValue, plugValueType ) :
return dataValue
else :
with IECore.IgnoredExceptions( Exception ) :
return plugValueType( dataValue )
return None
| {
"content_hash": "6019fc983cf18b8abbfbd8d70916b29c",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 131,
"avg_line_length": 33.015452538631344,
"alnum_prop": 0.7067397699919765,
"repo_name": "davidsminor/gaffer",
"id": "843f1176f098802f74510bf43a6eda135d8452a7",
"size": "16840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferUI/PlugValueWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9286"
},
{
"name": "C++",
"bytes": "3358250"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3267354"
},
{
"name": "Shell",
"bytes": "7055"
},
{
"name": "Slash",
"bytes": "35200"
}
],
"symlink_target": ""
} |
"""
A example script to automatically send messages based on certain triggers.
The script makes uses of environment variables to determine the API ID,
hash, phone and such to be used. You may want to add these to your .bashrc
file, including TG_API_ID, TG_API_HASH, TG_PHONE and optionally TG_SESSION.
This script assumes that you have certain files on the working directory,
such as "xfiles.m4a" or "anytime.png" for some of the automated replies.
"""
from getpass import getpass
from collections import defaultdict
from datetime import datetime, timedelta
from os import environ
import re
from telethon import TelegramClient
from telethon.errors import SessionPasswordNeededError
from telethon.tl.types import UpdateNewChannelMessage, UpdateShortMessage, MessageService
from telethon.tl.functions.messages import EditMessageRequest
"""Uncomment this for debugging
import logging
logging.basicConfig(level=logging.DEBUG)
logging.debug('dbg')
logging.info('info')
"""
REACTS = {'emacs': 'Needs more vim',
'chrome': 'Needs more Firefox'}
# A list of dates of reactions we've sent, so we can keep track of floods
recent_reacts = defaultdict(list)
def update_handler(update):
global recent_reacts
try:
msg = update.message
except AttributeError:
# print(update, 'did not have update.message')
return
if isinstance(msg, MessageService):
print(msg, 'was service msg')
return
# React to messages in supergroups and PMs
if isinstance(update, UpdateNewChannelMessage):
words = re.split('\W+', msg.message)
for trigger, response in REACTS.items():
if len(recent_reacts[msg.to_id.channel_id]) > 3:
# Silently ignore triggers if we've recently sent 3 reactions
break
if trigger in words:
# Remove recent replies older than 10 minutes
recent_reacts[msg.to_id.channel_id] = [
a for a in recent_reacts[msg.to_id.channel_id] if
datetime.now() - a < timedelta(minutes=10)
]
# Send a reaction
client.send_message(msg.to_id, response, reply_to=msg.id)
# Add this reaction to the list of recent actions
recent_reacts[msg.to_id.channel_id].append(datetime.now())
if isinstance(update, UpdateShortMessage):
words = re.split('\W+', msg)
for trigger, response in REACTS.items():
if len(recent_reacts[update.user_id]) > 3:
# Silently ignore triggers if we've recently sent 3 reactions
break
if trigger in words:
# Send a reaction
client.send_message(update.user_id, response, reply_to=update.id)
# Add this reaction to the list of recent reactions
recent_reacts[update.user_id].append(datetime.now())
# Automatically send relevant media when we say certain things
# When invoking requests, get_input_entity needs to be called manually
if isinstance(update, UpdateNewChannelMessage) and msg.out:
if msg.message.lower() == 'x files theme':
client.send_voice_note(msg.to_id, 'xfiles.m4a', reply_to=msg.id)
if msg.message.lower() == 'anytime':
client.send_file(msg.to_id, 'anytime.png', reply_to=msg.id)
if '.shrug' in msg.message:
client(EditMessageRequest(
client.get_input_entity(msg.to_id), msg.id,
message=msg.message.replace('.shrug', r'¯\_(ツ)_/¯')
))
if isinstance(update, UpdateShortMessage) and update.out:
if msg.lower() == 'x files theme':
client.send_voice_note(update.user_id, 'xfiles.m4a', reply_to=update.id)
if msg.lower() == 'anytime':
client.send_file(update.user_id, 'anytime.png', reply_to=update.id)
if '.shrug' in msg:
client(EditMessageRequest(
client.get_input_entity(update.user_id), update.id,
message=msg.replace('.shrug', r'¯\_(ツ)_/¯')
))
if __name__ == '__main__':
session_name = environ.get('TG_SESSION', 'session')
user_phone = environ['TG_PHONE']
client = TelegramClient(
session_name, int(environ['TG_API_ID']), environ['TG_API_HASH'],
proxy=None, update_workers=4
)
try:
print('INFO: Connecting to Telegram Servers...', end='', flush=True)
client.connect()
print('Done!')
if not client.is_user_authorized():
print('INFO: Unauthorized user')
client.send_code_request(user_phone)
code_ok = False
while not code_ok:
code = input('Enter the auth code: ')
try:
code_ok = client.sign_in(user_phone, code)
except SessionPasswordNeededError:
password = getpass('Two step verification enabled. '
'Please enter your password: ')
code_ok = client.sign_in(password=password)
print('INFO: Client initialized successfully!')
client.add_update_handler(update_handler)
input('Press Enter to stop this!\n')
except KeyboardInterrupt:
pass
finally:
client.disconnect()
| {
"content_hash": "0045e246a9b6fd07ae6ca6f3479fe802",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 89,
"avg_line_length": 39.330882352941174,
"alnum_prop": 0.6176855487006917,
"repo_name": "andr-04/Telethon",
"id": "660263639e11a34d92cfce9a3e30194d64a4d077",
"size": "5380",
"binary": false,
"copies": "1",
"ref": "refs/heads/asyncio",
"path": "telethon_examples/replier.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "276159"
}
],
"symlink_target": ""
} |
from twext.python.filepath import CachingFilePath as FilePath
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.ical import Component
from twistedcaldav.memcachelock import MemcacheLock
from twistedcaldav.memcacher import Memcacher
from twistedcaldav.test.util import StoreTestCase, SimpleStoreRequest
from txdav.caldav.datastore.sql import CalendarObject
from txweb2 import responsecode
from txweb2.dav.util import joinURL
from txweb2.http_headers import MimeType
from txweb2.iweb import IResponse
from txweb2.stream import MemoryStream, FileStream
class CollectionContents(StoreTestCase):
"""
PUT request
"""
dataPath = FilePath(__file__).sibling("data")
def setUp(self):
# Need to fake out memcache
def _getFakeMemcacheProtocol(self):
result = super(MemcacheLock, self)._getMemcacheProtocol()
if isinstance(result, Memcacher.nullCacher):
result = self._memcacheProtocol = Memcacher.memoryCacher()
return result
self.patch(MemcacheLock, "_getMemcacheProtocol",
_getFakeMemcacheProtocol)
# Need to not do implicit behavior during these tests
def _fakeDoImplicitScheduling(self, component, inserting, internal_state, options, updateSelf=False):
return False, None, False, None
self.patch(CalendarObject, "doImplicitScheduling",
_fakeDoImplicitScheduling)
# Tests in this suite assume that the root resource is a calendar home.
# FIXME: there should be a centralized way of saying 'make this look
# like a calendar home'
return super(CollectionContents, self).setUp()
@inlineCallbacks
def test_collection_in_calendar(self):
"""
Make (regular) collection in calendar
"""
calendar_uri = "/calendars/users/wsanchez/collection_in_calendar/"
principal = yield self.actualRoot.findPrincipalForAuthID("wsanchez")
request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authPrincipal=principal)
response = yield self.send(request)
response = IResponse(response)
if response.code != responsecode.CREATED:
self.fail("MKCALENDAR failed: %s" % (response.code,))
nested_uri = joinURL(calendar_uri, "nested")
request = SimpleStoreRequest(self, "MKCOL", nested_uri, authPrincipal=principal)
response = yield self.send(request)
response = IResponse(response)
if response.code != responsecode.FORBIDDEN:
self.fail("Incorrect response to nested MKCOL: %s" % (response.code,))
def test_bogus_file(self):
"""
Bogus file in calendar collection
"""
# FIXME: Should FileStream be OK here?
# FIXME: Should FileStream be OK here?
dst_file = file(__file__)
self.addCleanup(dst_file.close)
stream = FileStream(dst_file)
return self._test_file_in_calendar("bogus file in calendar", (stream, responsecode.FORBIDDEN))
def openHolidays(self):
"""
Open the 'Holidays.ics' calendar.
@return: an open file pointing at the start of Holidays.ics
@rtype: C{file}
"""
f = self.dataPath.child("Holidays.ics").open()
self.addCleanup(f.close)
return f
def test_monolithic_ical(self):
"""
Monolithic iCalendar file in calendar collection
"""
# FIXME: Should FileStream be OK here?
dst_file = self.openHolidays()
stream = FileStream(dst_file)
return self._test_file_in_calendar("monolithic iCalendar file in calendar", (stream, responsecode.FORBIDDEN))
def test_single_events(self):
"""
Single events in calendar collection
"""
work = []
stream = self.openHolidays()
calendar = Component.fromStream(stream)
for subcomponent in calendar.subcomponents():
if subcomponent.name() == "VEVENT":
subcalendar = Component("VCALENDAR")
subcalendar.addComponent(subcomponent)
for property in calendar.properties():
subcalendar.addProperty(property)
work.append((MemoryStream(str(subcalendar)), responsecode.CREATED))
return self._test_file_in_calendar("single event in calendar", *work)
def test_duplicate_uids(self):
"""
Mutiple resources with the same UID.
"""
stream = self.dataPath.child(
"Holidays").child(
"C318AA54-1ED0-11D9-A5E0-000A958A3252.ics").open()
try:
calendar = str(Component.fromStream(stream))
finally:
stream.close()
return self._test_file_in_calendar(
"mutiple resources with the same UID",
(MemoryStream(calendar), responsecode.CREATED),
(MemoryStream(calendar), responsecode.FORBIDDEN),
)
@inlineCallbacks
def _test_file_in_calendar(self, what, *work):
"""
Creates a calendar collection, then PUTs a resource into that collection
with the data from given stream and verifies that the response code from the
PUT request matches the given response_code.
"""
calendar_uri = "/calendars/users/wsanchez/testing_calendar/"
principal = yield self.actualRoot.findPrincipalForAuthID("wsanchez")
request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authPrincipal=principal)
response = yield self.send(request)
response = IResponse(response)
if response.code != responsecode.CREATED:
self.fail("MKCALENDAR failed: %s" % (response.code,))
c = 0
for stream, response_code in work:
dst_uri = joinURL(calendar_uri, "dst%d.ics" % (c,))
request = SimpleStoreRequest(self, "PUT", dst_uri, authPrincipal=principal)
request.headers.setHeader("if-none-match", "*")
request.headers.setHeader("content-type", MimeType("text", "calendar"))
request.stream = stream
response = yield self.send(request)
response = IResponse(response)
if response.code != response_code:
self.fail("Incorrect response to %s: %s (!= %s)" % (what, response.code, response_code))
c += 1
@inlineCallbacks
def test_fail_dot_file_put_in_calendar(self):
"""
Make (regular) collection in calendar
"""
calendar_uri = "/calendars/users/wsanchez/dot_file_in_calendar/"
principal = yield self.actualRoot.findPrincipalForAuthID("wsanchez")
request = SimpleStoreRequest(self, "MKCALENDAR", calendar_uri, authPrincipal=principal)
response = yield self.send(request)
response = IResponse(response)
if response.code != responsecode.CREATED:
self.fail("MKCALENDAR failed: %s" % (response.code,))
stream = self.dataPath.child(
"Holidays").child(
"C318AA54-1ED0-11D9-A5E0-000A958A3252.ics"
).open()
try:
calendar = str(Component.fromStream(stream))
finally:
stream.close()
event_uri = "/".join([calendar_uri, ".event.ics"])
request = SimpleStoreRequest(self, "PUT", event_uri, authPrincipal=principal)
request.headers.setHeader("content-type", MimeType("text", "calendar"))
request.stream = MemoryStream(calendar)
response = yield self.send(request)
response = IResponse(response)
if response.code != responsecode.FORBIDDEN:
self.fail("Incorrect response to dot file PUT: %s" % (response.code,))
| {
"content_hash": "7974fa4ae9702c978d69929157cb8cce",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 117,
"avg_line_length": 37.46376811594203,
"alnum_prop": 0.6353320438426822,
"repo_name": "red-hood/calendarserver",
"id": "ab6b3b9c1b695ec413b915147db12dff1bd0883c",
"size": "8362",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "twistedcaldav/test/test_collectioncontents.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1482"
},
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "DIGITAL Command Language",
"bytes": "1234"
},
{
"name": "DTrace",
"bytes": "13143"
},
{
"name": "HTML",
"bytes": "36120"
},
{
"name": "JavaScript",
"bytes": "80248"
},
{
"name": "Makefile",
"bytes": "14429"
},
{
"name": "PLSQL",
"bytes": "12719"
},
{
"name": "PLpgSQL",
"bytes": "291431"
},
{
"name": "Python",
"bytes": "10537612"
},
{
"name": "R",
"bytes": "1091"
},
{
"name": "SQLPL",
"bytes": "6430"
},
{
"name": "Shell",
"bytes": "96975"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='key-exchange-plugin',
description='A Cloudify plugin to exchange public keys between dynamically instanciated nodes',
version='0.85',
author='Joshua Cornutt',
author_email='[email protected]',
packages=['plugin'],
install_requires=[
'cloudify-plugins-common>=3.2.1'
],
) | {
"content_hash": "a9ce9e31cdce88543539a0b91fcfb167",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 99,
"avg_line_length": 27.307692307692307,
"alnum_prop": 0.6816901408450704,
"repo_name": "01000101/cloudify-test-app",
"id": "119a39ed9e64de014d07bf7ea9881a784a62b83d",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Package1/plugins/exchange/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1019"
},
{
"name": "HTML",
"bytes": "3770"
},
{
"name": "JavaScript",
"bytes": "10525"
},
{
"name": "Python",
"bytes": "23852"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsersOperations(object):
"""UsersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UserList"]
"""Gets all the users registered on a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UserList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2019_07_01.models.UserList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UserList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.User"
"""Gets the properties of the specified user.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: User, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2019_07_01.models.User
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.User"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
user, # type: "_models.User"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.User"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.User"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(user, 'User')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
user, # type: "_models.User"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.User"]
"""Creates a new user or updates an existing user's information on a Data Box Edge/Data Box
Gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param user: The user details.
:type user: ~azure.mgmt.databoxedge.v2019_07_01.models.User
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either User or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2019_07_01.models.User]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.User"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
user=user,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
def begin_delete(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the user on a databox edge/gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
| {
"content_hash": "c6d915197537503b44f64093a1921945",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 211,
"avg_line_length": 47.101851851851855,
"alnum_prop": 0.6292510320424611,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6eea2a22d3d6b2fb32061dc4236a533a294b07d6",
"size": "20815",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_07_01/operations/_users_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Get a single product from a specified Google Merchant Center account and a product ID"""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'merchant_id',
help='The ID of the merchant center.')
argparser.add_argument(
'product_id',
help='The ID of the product to get.')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'content', 'v2', __doc__, __file__, parents=[argparser])
#grab the command line paramaters for Merchant ID, and the product ID
merchant_id = flags.merchant_id
product_id = flags.product_id
try:
request = service.products().get(merchantId=merchant_id, productId="online:en:US:" + product_id)
result = request.execute()
#print some product attribute information
#print the product ID
print "Product ID: " + result['id']
#retrieve and print the price
price = result['price']
print "Product Price: " + price['value']
#print the title of the product
print "Product Title: " + result['title']
#print the stock avaiability of the product
print "Product Stock Availability:" + result['availability']
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "c012aa275fe208220acd4b3d9bc65b01",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 100,
"avg_line_length": 26.70175438596491,
"alnum_prop": 0.69053876478318,
"repo_name": "davidtzau/google-shopping-examples",
"id": "5e47f34bb6b3ce6e7a22ea784c6b1a7291ecd187",
"size": "2120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "product_get.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5057"
}
],
"symlink_target": ""
} |
import flatbuffers
import multiprocessing
import queue
from threading import Thread
from rlbot.messages.flat import QuickChat
from rlbot.messages.flat import QuickChatSelection
from rlbot.utils.logging_utils import get_logger
from rlbot.utils.structures.utils import create_enum_object
"""
Look for quick chats from here:
https://github.com/RLBot/RLBot/blob/master/src/main/flatbuffers/rlbot.fbs
"""
QuickChats = create_enum_object([chat for chat in dir(QuickChatSelection.QuickChatSelection)
if not chat.startswith('__') and not
callable(getattr(QuickChatSelection.QuickChatSelection, chat))],
list_name='quick_chat_list',
other_attributes=[
('CHAT_NONE', -1),
('CHAT_EVERYONE', False),
('CHAT_TEAM_ONLY', True)
],
attribute_object=QuickChatSelection.QuickChatSelection)
def send_quick_chat_flat(game_interface, index, team, team_only, quick_chat):
builder = flatbuffers.Builder(0)
QuickChat.QuickChatStart(builder)
QuickChat.QuickChatAddQuickChatSelection(builder, quick_chat)
QuickChat.QuickChatAddPlayerIndex(builder, index)
QuickChat.QuickChatAddTeamOnly(builder, team_only)
result = QuickChat.QuickChatEnd(builder)
builder.Finish(result)
return game_interface.send_chat_flat(builder)
def send_quick_chat(queue_holder, index, team, team_only, quick_chat):
"""
Sends a quick chat to the general queue for everyone to pull from
:param queue_holder:
:param index: The index of the player sending the message
:param team: The team of the player sending the message
:param team_only: if the message is team only
:param quick_chat: The contents of the quick chat
:return:
"""
queue_holder["output"].put((index, team, team_only, quick_chat))
| {
"content_hash": "f7675be7a9b36b229bf5aeb0a8f9a47e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 97,
"avg_line_length": 37.76923076923077,
"alnum_prop": 0.664969450101833,
"repo_name": "drssoccer55/RLBot",
"id": "7ad1efbb52010f48b6a6d4ed76816740635443c7",
"size": "1964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/rlbot/utils/structures/quick_chats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "13520"
},
{
"name": "Python",
"bytes": "48042"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django_couchdb_utils.sessions.models import Session
from couchdbkit.exceptions import ResourceNotFound
class SessionStore(SessionBase):
def create(self):
while True:
self._session_key = self._get_new_session_key()
self.modified = True
self._session_cache = {}
try:
self.save(must_create=True)
except CreateError:
continue
return None
def load(self):
session = Session.get_session(self.session_key)
if not session:
self.create()
return {}
try:
return self.decode(session.session_data)
except SuspiciousOperation:
return {}
def save(self, must_create=False):
session = Session.get_session(self.session_key)
if must_create and session:
raise CreateError
if must_create:
session = Session()
session.session_key = self.session_key
session.session_data = self.encode(self._get_session(no_load=must_create))
session.expire_date = self.get_expiry_date()
else:
if not session:
return None
session.session_data = self.encode(self._get_session(no_load=must_create))
session.expire_date = self.get_expiry_date()
session.save()
def exists(self, session_key):
session = Session.get_session(session_key)
if session is None:
return False
return True
def delete(self, session_key=None):
if not session_key:
if not self._session_key:
return None
session_key = self._session_key
session = Session.get_session(session_key)
if not session:
return None
try:
session.delete()
except ResourceNotFound:
pass
| {
"content_hash": "c49a9efb9d9137332e38243997ffa9b8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 86,
"avg_line_length": 31.923076923076923,
"alnum_prop": 0.5898795180722891,
"repo_name": "stefankoegl/django-couchdb-utils",
"id": "53aa6583e5de903c489c97b6e1b64a35b3536047",
"size": "2075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_couchdb_utils/sessions/couchdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1322"
},
{
"name": "Python",
"bytes": "142363"
},
{
"name": "Shell",
"bytes": "2985"
}
],
"symlink_target": ""
} |
"""
The Human Hive
"""
import sys
import time
import argparse
import multiprocessing
import numpy as np
import pyaudio
from humanhive import samplestream, utils, sources
from humanhive import HumanHive
def build_parser():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"--n-channels",
required=True,
help="The number of channels to use for operation.",
type=int)
parser.add_argument(
"--swarm-sample",
required=True,
help="Audio file containing swarm sample")
parser.add_argument(
"--output-device-id",
help="The ID for the output sound card to use.",
default=None,
# type=int
)
parser.add_argument(
"--input-device-id",
help="The ID for the input sound card to use.",
default=None,
# type=int
)
parser.add_argument(
"--recorded-samples-dir",
required=False,
help=(
"Directory for saving recordings. "
"Will be created if it doesn't exist."))
return parser
if __name__ == "__main__":
args = build_parser().parse_args()
sample_rate = 48000#utils.get_sample_rate_for_device(args.output_device_id)
print("Initialising...")
humanhive = HumanHive(
n_channels=args.n_channels,
output_device_id=args.output_device_id,
input_device_id=args.input_device_id,
sample_rate=sample_rate,
master_volume=1.0)
audio_data = samplestream.load_wave_file(
args.swarm_sample, mono=True)
# Add a source
humanhive.source_bank.add_source(
sources.SwarmSource(
audio_data,
n_channels=args.n_channels,
sample_rate=sample_rate))
print("Entering HumanHive main loop")
humanhive.run()
print("Exiting")
| {
"content_hash": "a85f51a86bef0817972f4a7f267847d2",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 22.753086419753085,
"alnum_prop": 0.6071622354856213,
"repo_name": "mfergie/human-hive",
"id": "9eae55f4843324462d1fad411d93965415126887",
"size": "1866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/humanhive_run.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "46112"
}
],
"symlink_target": ""
} |
import scanner
import numpy as np
import cv2
import pickle
import time
import progressbar
class Detector(object):
def __init__(self, descriptor, classifier):
self.descriptor = descriptor
self.classifier = classifier
def dump(self, filename):
obj = {"descriptor" : self.descriptor, "classifier" : self.classifier}
with open(filename, 'wb') as f:
pickle.dump(obj, f)
@classmethod
def load(cls, filename):
with open(filename, 'rb') as f:
obj = pickle.load(f)
loaded = cls(descriptor = obj["descriptor"], classifier = obj["classifier"])
return loaded
def _get_grayscale(self, image):
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
elif len(image.shape) == 2:
gray_image = image
else:
raise ValueError('Input image is invalid.')
return gray_image
def _show(self, bb, prob, threshold_prob):
if prob > threshold_prob:
delay=0.05
color=(255,0,0)
self.show_boxes([bb], "{:.2f}".format(prob), delay, color)
else:
delay = 0.005
color = (0,255,0)
self.show_boxes([bb], "{:.2f}".format(prob), delay, color)
def run(self, image, window_size, step, pyramid_scale=0.7, threshold_prob=0.5, do_nms=True, show_result=True, show_operation=False):
"""
Parameters
----------
image : array, shape (n_rows, n_cols, n_channels) or (n_rows, n_cols)
Input image to run the detector
Returns
----------
boxes : array, shape (n_detected, height, 4)
detected bounding boxes
probs : array, shape (n_detected, 1)
probability at the boxes
"""
self._display_image = image
gray_image = self._get_grayscale(image)
scanner_ = scanner.ImageScanner(gray_image)
boxes = []
probs = []
# Todo : 모든 patch 를 generate 한 다음 한번에 연산하는 것과 속도를 비교해보자.
for _ in scanner_.get_next_layer(pyramid_scale, window_size[0], window_size[1]):
for _, _, window in scanner_.get_next_patch(step[0], step[1], window_size[0], window_size[1]):
# Todo: Refactoring, direct access should be denied
features = self.descriptor.describe([window]).reshape(1, -1)
prob = self.classifier.predict_proba(features)[0][1]
if prob > threshold_prob:
boxes.append(scanner_.bounding_box)
probs.append(prob)
if show_operation:
self._show(scanner_.bounding_box, prob, threshold_prob)
if do_nms and boxes != []:
# Todo : overlapThresh를 0.5 로 바꾸고 테스트해보자.
boxes, probs = self._do_nms(boxes, probs, overlapThresh=0.3)
boxes = np.array(boxes, "int")
probs = np.array(probs)
if show_result:
self.show_boxes(boxes)
return boxes, probs
def show_boxes(self, boxes, msg=None, delay=None, color=(0,0,255)):
image = self._display_image.copy()
for y1, y2, x1, x2 in boxes:
cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
if msg is not None:
cv2.putText(image, msg, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, color, thickness=2)
cv2.imshow("Sliding Window Operation", image)
if delay is None:
cv2.waitKey(0)
else:
cv2.waitKey(1)
time.sleep(delay)
def hard_negative_mine(self, negative_image_files, window_size, step, pyramid_scale=0.7, threshold_prob=0.5):
# Todo : progress bar
features = []
probs = []
for patch, probability in self._generate_negative_patches(negative_image_files,
window_size, step, pyramid_scale,
threshold_prob):
feature = self.descriptor.describe([patch])[0]
features.append(feature)
probs.append(probability)
if len(probs) == 0:
pass
else:
# sort by probability
data = np.concatenate([np.array(probs).reshape(-1,1), np.array(features)], axis=1)
data = data[data[:, 0].argsort()[::-1]]
features = data[:, 1:]
probs = data[:, 0]
return features, probs
def _generate_negative_patches(self, negative_image_files, window_size, step, pyramid_scale, threshold_prob):
widgets = ["Generating negative samples which represent high probability: ",
progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(negative_image_files), widgets=widgets).start()
for i, image_file in enumerate(negative_image_files):
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect objects in the image
(boxes, probs) = self.run(image,
window_size, step, pyramid_scale,
threshold_prob,
do_nms=False,
show_result=False,
show_operation=False)
pbar.update(i)
for (y1, y2, x1, x2), prob in zip(boxes, probs):
negative_patch = cv2.resize(image[y1:y2, x1:x2], (window_size[1], window_size[0]), interpolation=cv2.INTER_AREA)
yield negative_patch, prob
pbar.finish()
# todo: code review
def _do_nms(self, boxes, probs, overlapThresh=0.5):
"""
Reference: http://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
"""
if len(boxes) == 0:
return []
boxes = np.array(boxes, dtype="float")
probs = np.array(probs)
pick = []
y1 = boxes[:, 0]
y2 = boxes[:, 1]
x1 = boxes[:, 2]
x2 = boxes[:, 3]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(probs)
# keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add the index value to the list of
# picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of the bounding box and the
# smallest (x, y) coordinates for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have overlap greater than the
# provided overlap threshold
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked
return boxes[pick].astype("int"), probs[pick]
if __name__ == "__main__":
pass
| {
"content_hash": "fdb52f43cec585ffb0cb40d3cd813674",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 136,
"avg_line_length": 36.46636771300449,
"alnum_prop": 0.5012297097884899,
"repo_name": "penny4860/object-detector",
"id": "09ce8c82dc93cfd5aa3066c87ce0a3789c15f469",
"size": "8224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "object_detector/detector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59819"
}
],
"symlink_target": ""
} |
from .helper import DataHelper, EmailDelivery
"""
Module for work with data
"""
__all__ = [
'DataHelper',
'EmailDelivery'
]
| {
"content_hash": "975c447d4bed3f9b9860334b5bf1655f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 13.4,
"alnum_prop": 0.6417910447761194,
"repo_name": "hose314/cs.betlabs.ru",
"id": "e4ea5ea35c40db728dc2e636c607f2c00b04ee54",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AAA/Data/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "57732"
},
{
"name": "JavaScript",
"bytes": "3273"
},
{
"name": "Python",
"bytes": "8798"
}
],
"symlink_target": ""
} |
from body import Body
class Simulation(object):
MOUNTAIN_HEIGHT = 1E06 # 1000 km
def __init__(self, num_random_objs):
self.bodies = []
self.selected_body = None
if num_random_objs > 0:
self.selected_body = 0
self.bodies.append(Body.generate_circular_equatorial_orbit(6.0E5, (0.0, 1.0, 1.0, 1.0)))
self.bodies.append(Body.generate_circular_equatorial_orbit(1.2E6))
for x in xrange(num_random_objs - 2):
self.bodies.append(Body.generate_random_orbit())
self.pos_viz_mode = Body.POSITION_VISUALISATIONS['symbol']
self.orbit_viz_mode = Body.ORBIT_VISUALISATIONS['all']
self.set_defaults()
def set_defaults(self):
self.state = 'pre-run'
self.time = 0.0
self.time_step = 10.0
self.time_barrier = 1.0E15
self.draw_atmosphere = True
self.draw_mountain = False
self.planet_transparent = True
def current_body(self):
if self.selected_body is not None:
return self.bodies[self.selected_body]
else:
return None
def start(self):
if self.state in ('pre-run', 'paused'):
self.state = 'running'
def pause(self):
if self.state == 'running':
self.state = 'paused'
def step_time(self):
if self.state != 'finished':
if self.state == 'running' and self.time < self.time_barrier:
self.time += self.time_step
else:
self.state = 'finished'
def forward_time(self, t):
self.time = t
for body in self.bodies:
body.calc_state_vectors(t)
# iss.a = 415000 + 42000 + 2 * 6.371E6
# iss.T = 5575.12
# iss.e = 0.0003706
# iss.i = math.radians(51.647)
# iss.o = math.radians(284.7313)
# iss.w = math.radians(60.0528)
| {
"content_hash": "89e1dba4920dd57a8a812157bb89b356",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 100,
"avg_line_length": 29.876923076923077,
"alnum_prop": 0.5484037075180227,
"repo_name": "bojanbog/orbital-academy",
"id": "9d04f6ea7aa8fb287aef757c352c4aead5815816",
"size": "1942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_version/simulation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45"
},
{
"name": "JavaScript",
"bytes": "18046"
},
{
"name": "Python",
"bytes": "52483"
}
],
"symlink_target": ""
} |
from .upload import manual_upload
from .upload import get_hashes
from .geotag import Geotag
def upload(path, **kwargs):
manual_upload(path, **kwargs)
def geotag(path, path_gpx, **kwargs):
"""GeoTag
:param ``path``: Image File Path
:param ``gpx``: GPX File Path
:param ``bearing``: Bearing/Direction offset (default=0)
:param ``time``: Time offset in Seconds (default=0)
"""
return Geotag(path, path_gpx, **kwargs)
def hashes(email, password):
return get_hashes(email, password)
| {
"content_hash": "740820881fdd0d0dbef43fcaa473a903",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 60,
"avg_line_length": 23.681818181818183,
"alnum_prop": 0.6641074856046065,
"repo_name": "DenisCarriere/mapillary",
"id": "391a63ade0b253b51d086f9ecc915a34e67026a5",
"size": "555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapillary/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73550"
}
],
"symlink_target": ""
} |
import pymel.core as pm
import logging
import Renderer as Renderer
import traceback
import sys
import os
import optimizeTextures
import path
reload(Renderer)
log = logging.getLogger("mtkrLogger")
RENDERER_NAME = "Kray"
class KrayRenderer(Renderer.MayaToRenderer):
theRendererInstance = None
@staticmethod
def theRenderer(arg=None):
if not KrayRenderer.theRendererInstance:
KrayRenderer.theRendererInstance = KrayRenderer(RENDERER_NAME , __name__)
return KrayRenderer.theRendererInstance
def __init__(self, rendererName, moduleName):
Renderer.MayaToRenderer.__init__(self, rendererName, moduleName)
self.rendererTabUiDict = {}
def getEnumList(self, attr):
return [(i, v) for i, v in enumerate(attr.getEnums().keys())]
def updateTest(self, dummy=None):
print "UpdateTest", dummy
def addUserTabs(self):
pm.renderer(self.rendererName, edit=True, addGlobalsTab=self.renderTabMelProcedure("Environment"))
pm.renderer(self.rendererName, edit=True, addGlobalsTab=self.renderTabMelProcedure("Photons"))
pm.renderer(self.rendererName, edit=True, addGlobalsTab=self.renderTabMelProcedure("FinalGathering"))
pm.renderer(self.rendererName, edit=True, addGlobalsTab=self.renderTabMelProcedure("Quality"))
def KrayEnvironmentCreateTab(self):
log.debug("KrayEnvironmentCreateTab()")
self.createGlobalsNode()
envDict = {}
self.rendererTabUiDict['environment'] = envDict
bgDict = {}
self.rendererTabUiDict['background'] = bgDict
parentForm = pm.setParent(query=True)
pm.setUITemplate("renderGlobalsTemplate", pushTemplate=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
scLo = self.rendererName + "EnvScrollLayout"
with pm.scrollLayout(scLo, horizontalScrollBarThickness=0):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
with pm.frameLayout(label="Background", collapsable=True, collapse=False):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
attr = pm.Attribute(self.renderGlobalsNodeName + ".backgroundType")
ui = pm.attrEnumOptionMenuGrp(label="Background Type", at=self.renderGlobalsNodeName + ".backgroundType", ei=self.getEnumList(attr))
bgDict['environmentColor'] = pm.attrColorSliderGrp(label="Background Color", at=self.renderGlobalsNodeName + ".environmentColor")
bgDict['gradientHorizon'] = pm.attrColorSliderGrp(label="Horizon Color", at=self.renderGlobalsNodeName + ".gradientHorizon")
bgDict['gradientZenit'] = pm.attrColorSliderGrp(label="Zenith Color", at=self.renderGlobalsNodeName + ".gradientZenit")
bgDict['nadir'] = pm.attrColorSliderGrp(label="Bottom Color", at=self.renderGlobalsNodeName + ".nadir")
bgDict['groundAlbedo'] = pm.attrColorSliderGrp(label="Ground Albedo", at=self.renderGlobalsNodeName + ".groundAlbedo")
pm.separator()
bgDict['environmentMap'] = pm.attrColorSliderGrp(label="Environment Map", at=self.renderGlobalsNodeName + ".environmentMap")
bgDict['environmentMap2'] = pm.attrColorSliderGrp(label="Environment Map 2", at=self.renderGlobalsNodeName + ".environmentMap2")
pm.separator()
bgDict['sunDir'] = pm.attrFieldGrp(label="Sun Direction:", at=self.renderGlobalsNodeName + ".sunDir")
bgDict['zenithDir'] = pm.attrFieldGrp(label="Zenith Direction:", at=self.renderGlobalsNodeName + ".zenithDir")
bgDict['orientation'] = pm.attrFieldGrp(label="Orientation:", at=self.renderGlobalsNodeName + ".orientation")
pm.separator()
bgDict['skyGamma'] = pm.floatFieldGrp(label="Sky Gamma:", numberOfFields=1)
pm.connectControl(bgDict['skyGamma'], self.renderGlobalsNodeName + ".skyGamma", index=2)
bgDict['groundGamma'] = pm.floatFieldGrp(label="Ground Gamma:", numberOfFields=1)
pm.connectControl(bgDict['groundGamma'], self.renderGlobalsNodeName + ".groundGamma", index=2)
bgDict['turbidity'] = pm.floatFieldGrp(label="Turbidity:", numberOfFields=1)
pm.connectControl(bgDict['turbidity'], self.renderGlobalsNodeName + ".turbidity", index=2)
bgDict['exposure'] = pm.floatFieldGrp(label="Exposure:", numberOfFields=1)
pm.connectControl(bgDict['exposure'], self.renderGlobalsNodeName + ".exposure", index=2)
bgDict['sunIntensity'] = pm.floatFieldGrp(label="Sun Intensity:", numberOfFields=1)
pm.connectControl(bgDict['sunIntensity'], self.renderGlobalsNodeName + ".sunIntensity", index=2)
pm.separator()
bgDict['solidAngle'] = pm.floatFieldGrp(label="Solid Angle:", numberOfFields=1)
pm.connectControl(bgDict['solidAngle'], self.renderGlobalsNodeName + ".solidAngle", index=2)
bgDict['sunSpotAngle'] = pm.floatFieldGrp(label="Sun Spot Angle:", numberOfFields=1)
pm.connectControl(bgDict['sunSpotAngle'], self.renderGlobalsNodeName + ".sunSpotAngle", index=2)
with pm.frameLayout(label="Environment", collapsable=True, collapse=False):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
attr = pm.Attribute(self.renderGlobalsNodeName + ".environmentType")
ui = pm.attrEnumOptionMenuGrp(label="Environment Type", at=self.renderGlobalsNodeName + ".environmentType", ei=self.getEnumList(attr))
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.setUITemplate("renderGlobalsTemplate", popTemplate=True)
pm.formLayout(parentForm, edit=True, attachForm=[ (scLo, "top", 0), (scLo, "bottom", 0), (scLo, "left", 0), (scLo, "right", 0) ])
self.KrayRendererUIUpdateCallback("environment")
self.KrayRendererUIUpdateCallback("background")
pm.scriptJob(attributeChange=[self.renderGlobalsNode.backgroundType, pm.Callback(self.KrayRendererUIUpdateCallback, "background")])
pm.scriptJob(attributeChange=[self.renderGlobalsNode.environmentType, pm.Callback(self.KrayRendererUIUpdateCallback, "environment")])
def KrayEnvironmentUpdateTab(self):
log.debug("KrayEnvironmentUpdateTab()")
def KrayPhotonsCreateTab(self):
log.debug("KrayPhotonsCreateTab()")
self.createGlobalsNode()
photonsDict = {}
self.rendererTabUiDict['photons'] = photonsDict
causticDict = {}
self.rendererTabUiDict['caustic'] = causticDict
parentForm = pm.setParent(query=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
scLo = self.rendererName + "PhotonsScrollLayout"
with pm.scrollLayout(scLo, horizontalScrollBarThickness=0):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
with pm.frameLayout(label="Photons", collapsable=True, collapse=False):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
ui = pm.floatFieldGrp(label="GI Resolution:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".giResolution", index=2)
ui = pm.checkBoxGrp(label="GI Resolution Auto:")
pm.connectControl(ui, self.renderGlobalsNodeName + ".giResolutionAuto", index=2)
ui = pm.checkBoxGrp(label="Preview Photons:")
pm.connectControl(ui, self.renderGlobalsNodeName + ".previewPhotons", index=2)
attr = pm.Attribute(self.renderGlobalsNodeName + ".photonMapType")
ui = pm.attrEnumOptionMenuGrp(label="Photon Map Type", at=self.renderGlobalsNodeName + ".photonMapType", ei=self.getEnumList(attr))
ui = pm.intFieldGrp(label="Photons:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonCount", index=2)
ui = pm.floatFieldGrp(label="Power:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonPower", index=2)
ui = pm.floatFieldGrp(label="NBlur:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonNBlur", index=2)
ui = pm.floatFieldGrp(label="Precache Dist:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonPrecacheDist", index=2)
ui = pm.floatFieldGrp(label="Precache Blur:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonPrecacheBlur", index=2)
pm.separator()
ui = pm.checkBoxGrp(label="Auto Photons:")
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonUseAutoPhotons", index=2)
ui = pm.floatFieldGrp(label="Auto Photons Low:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonAutoPhotonsLow", index=2)
ui = pm.floatFieldGrp(label="Auto Photons High:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonAutoPhotonsHigh", index=2)
ui = pm.floatFieldGrp(label="Auto Photons Steps:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".photonAutoPhotonsSteps", index=2)
with pm.frameLayout(label="Caustics", collapsable=True, collapse=False):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
ui = pm.checkBoxGrp(label="Add To Lightmap:")
pm.connectControl(ui, self.renderGlobalsNodeName + ".causticsAddToLightmap", index=2)
pm.separator()
ui = pm.intFieldGrp(label="Caustic Photons:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".causticsCount", index=2)
ui = pm.floatFieldGrp(label="Caustic Power:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".causticsPower", index=2)
ui = pm.floatFieldGrp(label="Caustic NBlur:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".causticsNBlur", index=2)
pm.separator()
ui = pm.checkBoxGrp(label="Caustic Auto Photons:")
pm.connectControl(ui, self.renderGlobalsNodeName + ".causticsUseAutoPhotons", index=2)
ui = pm.floatFieldGrp(label="Caustic Auto Low:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".causticsAutoPhotonsLow", index=2)
ui = pm.floatFieldGrp(label="Caustic Auto High:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".causticsAutoPhotonsHigh", index=2)
ui = pm.floatFieldGrp(label="Caustic Auto Steps:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".causticsAutoPhotonsSteps", index=2)
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.formLayout(parentForm, edit=True, attachForm=[ (scLo, "top", 0), (scLo, "bottom", 0), (scLo, "left", 0), (scLo, "right", 0) ])
def KrayPhotonsUpdateTab(self):
log.debug("KrayPhotonsUpdateTab()")
def KrayFinalGatheringCreateTab(self):
log.debug("KrayFinalGatheringCreateTab()")
self.createGlobalsNode()
parentForm = pm.setParent(query=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
scLo = self.rendererName + "PhotonsScrollLayout"
with pm.scrollLayout(scLo, horizontalScrollBarThickness=0):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
with pm.frameLayout(label="Photons frame", collapsable=True, collapse=False):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
ui = pm.floatFieldGrp(label="Threshold:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgThreshold", index=2)
pm.separator()
ui = pm.intFieldGrp(label="Min Rays:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgMinRays", index=2)
ui = pm.intFieldGrp(label="Max Rays:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgMaxRays", index=2)
ui = pm.floatFieldGrp(label="Prerender:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgPrerender", index=2)
ui = pm.intFieldGrp(label="Passes:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgPasses", index=2)
pm.separator()
ui = pm.floatFieldGrp(label="Sploth Detect:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgSplotchDetect", index=2)
ui = pm.floatFieldGrp(label="Sensitivity:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgSensitivity", index=2)
pm.separator()
ui = pm.checkBoxGrp(label="FG Reflections:", value1=False)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgReflections", index=2)
ui = pm.checkBoxGrp(label="FG Refractions:", value1=False)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgRefractions", index=2)
pm.separator()
ui = pm.floatFieldGrp(label="Spatial Tolerance:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgSpatialTolerance", index=2)
ui = pm.floatFieldGrp(label="Angular Tolerance:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgAngularTolerance", index=2)
ui = pm.floatFieldGrp(label="FG Min Dist:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgDistMin", index=2)
ui = pm.floatFieldGrp(label="FG Dist Max:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgDistMax", index=2)
ui = pm.floatFieldGrp(label="Density/Brightness:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgBrightness", index=2)
pm.separator()
ui = pm.intFieldGrp(label="Path Passes:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgPathPasses", index=2)
ui = pm.floatFieldGrp(label="Corner Dist:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgCornerDist", index=2)
ui = pm.checkBoxGrp(label="Show Samples:", value1=False)
pm.connectControl(ui, self.renderGlobalsNodeName + ".fgShowSamples", index=2)
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.formLayout(parentForm, edit=True, attachForm=[ (scLo, "top", 0), (scLo, "bottom", 0), (scLo, "left", 0), (scLo, "right", 0) ])
def KrayFinalGatheringUpdateTab(self):
log.debug("KrayFinalGatheringUpdateTab()")
# def KraySamplingCreateTab(self):
# log.debug("KraySamplingCreateTab()")
# self.createGlobalsNode()
# parentForm = pm.setParent(query = True)
# pm.setUITemplate("attributeEditorTemplate", pushTemplate = True)
# scLo = self.rendererName + "PhotonsScrollLayout"
# with pm.scrollLayout(scLo, horizontalScrollBarThickness = 0):
# with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn = True, width = 400):
# with pm.frameLayout(label="Photons frame", collapsable = True, collapse=False):
# ui = pm.checkBoxGrp(label="Dummy:", value1 = False)
# pm.setUITemplate("attributeEditorTemplate", popTemplate = True)
# pm.formLayout(parentForm, edit = True, attachForm = [ (scLo, "top", 0), (scLo, "bottom", 0), (scLo, "left", 0), (scLo, "right", 0) ])
#
# def KraySamplingUpdateTab(self):
# log.debug("KraySamplingUpdateTab()")
def KrayQualityCreateTab(self):
log.debug("KrayFinalGatheringCreateTab()")
self.createGlobalsNode()
qDict = {}
self.rendererTabUiDict['quality'] = qDict
parentForm = pm.setParent(query=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
scLo = self.rendererName + "PhotonsScrollLayout"
with pm.scrollLayout(scLo, horizontalScrollBarThickness=0):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
with pm.frameLayout(label="Quality", collapsable=True, collapse=False):
with pm.columnLayout(self.rendererName + "ColumnLayoutA", adjustableColumn=True, width=400):
attr = pm.Attribute(self.renderGlobalsNodeName + ".qLuminosityModel")
ui = pm.attrEnumOptionMenuGrp(label="Luminosity Model", at=self.renderGlobalsNodeName + ".qLuminosityModel", ei=self.getEnumList(attr))
attr = pm.Attribute(self.renderGlobalsNodeName + ".qAreaLights")
ui = pm.attrEnumOptionMenuGrp(label="Area Lights Computation", at=self.renderGlobalsNodeName + ".qAreaLights", ei=self.getEnumList(attr))
attr = pm.Attribute(self.renderGlobalsNodeName + ".qAreaLightVisibility")
ui = pm.attrEnumOptionMenuGrp(label="Area Light Visibility", at=self.renderGlobalsNodeName + ".qAreaLightVisibility", ei=self.getEnumList(attr))
pm.separator()
attr = pm.Attribute(self.renderGlobalsNodeName + ".qOctreeDetail")
ui = pm.attrEnumOptionMenuGrp(label="Octree Detail", at=self.renderGlobalsNodeName + ".qOctreeDetail", ei=self.getEnumList(attr))
# float qLevel; // float
# bool qDoubleSided; // bool
# bool qSpotlightsToArea; // bool
# float qAreaLightsThreshold; // float
# int qAMinRecursion; // int
# int qAMaxRecursion; // int
# float qLinearLightsThreshold; // float
# int qLMinRecursion; // int
# int qLMaxRecursion; // int
# float qLuminosityThreshold; // float
# int qLumMinRays; // int
# int qLumMaxRays; // int
# float qBlurringThreshold; // float
# int qBLumMinRays; // int
# int qBLumMaxRays; // int
# float qBAccuracyLimit; // float
# bool qTraceDirectLightReflections; // bool
#
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.formLayout(parentForm, edit=True, attachForm=[ (scLo, "top", 0), (scLo, "bottom", 0), (scLo, "left", 0), (scLo, "right", 0) ])
def KrayQualityUpdateTab(self):
log.debug("KrayQualityUpdateTab()")
def KrayRendererCreateTab(self):
log.debug("KrayRendererCreateTab()")
self.createGlobalsNode()
sDict = {}
self.rendererTabUiDict['sampling'] = sDict
parentForm = pm.setParent(query=True)
pm.setUITemplate("renderGlobalsTemplate", pushTemplate=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
scLo = self.rendererName + "ScrollLayout"
with pm.scrollLayout(scLo, horizontalScrollBarThickness=0):
with pm.columnLayout(self.rendererName + "ColumnLayout", adjustableColumn=True, width=400):
with pm.frameLayout(label="Image format", collapsable=True, collapse=False):
with pm.columnLayout(self.rendererName + "ColumnLayoutA", adjustableColumn=True, width=400):
attr = pm.Attribute(self.renderGlobalsNodeName + ".diffuseModel")
ui = pm.attrEnumOptionMenuGrp(label="Diffuse Model", at=self.renderGlobalsNodeName + ".diffuseModel", ei=self.getEnumList(attr))
attr = pm.Attribute(self.renderGlobalsNodeName + ".qLuminosityModel")
ui = pm.attrEnumOptionMenuGrp(label="Luminosity Model", at=self.renderGlobalsNodeName + ".qLuminosityModel", ei=self.getEnumList(attr))
pm.separator()
attr = pm.Attribute(self.renderGlobalsNodeName + ".imageFormat")
ui = pm.attrEnumOptionMenuGrp(label="Image format", at=self.renderGlobalsNodeName + ".imageFormat", ei=self.getEnumList(attr))
attr = pm.Attribute(self.renderGlobalsNodeName + ".bitdepth")
sDict['bitdepth'] = pm.attrEnumOptionMenuGrp(label="Bit depth", at=self.renderGlobalsNodeName + ".bitdepth", ei=self.getEnumList(attr))
sDict['jpgQuality'] = pm.intFieldGrp(label="Jpg Quality:", numberOfFields=1)
pm.connectControl(sDict['jpgQuality'], self.renderGlobalsNodeName + ".jpgQuality", index=2)
with pm.frameLayout(label="Sampling", collapsable=True, collapse=False):
with pm.columnLayout(self.rendererName + "ColumnLayoutA", adjustableColumn=True, width=400):
attr = pm.Attribute(self.renderGlobalsNodeName + ".samplingType")
ui = pm.attrEnumOptionMenuGrp(label="Sampling Type", at=self.renderGlobalsNodeName + ".samplingType", ei=self.getEnumList(attr))
sDict['fullScreenAA'] = pm.checkBoxGrp(label="Full Screen AA:", value1=False)
pm.connectControl(sDict['fullScreenAA'], self.renderGlobalsNodeName + ".fullScreenAA", index=2)
pm.separator()
sDict['gridSize'] = pm.intFieldGrp(label="Grid Size:", numberOfFields=1)
pm.connectControl(sDict['gridSize'], self.renderGlobalsNodeName + ".gridSize", index=2)
sDict['gridRotate'] = pm.checkBoxGrp(label="Grid Rotate:", value1=False)
pm.connectControl(sDict['gridRotate'], self.renderGlobalsNodeName + ".rotateGrid", index=2)
pm.separator()
ui = pm.intFieldGrp(label="Rays:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_rays", index=2)
ui = pm.intFieldGrp(label="Min Rays:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_minRays", index=2)
ui = pm.intFieldGrp(label="Max Rays:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_maxRays", index=2)
pm.separator()
ui = pm.intFieldGrp(label="Edge Thickness:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_thickness", index=2)
ui = pm.floatFieldGrp(label="Edge Absulut:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_edgeAbsolute", index=2)
ui = pm.floatFieldGrp(label="Edge Relative:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_relative", index=2)
pm.separator()
ui = pm.floatFieldGrp(label="Normal Sentitivity:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_normal", index=2)
ui = pm.floatFieldGrp(label="Z Sensitivity:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_z", index=2)
ui = pm.floatFieldGrp(label="Max Brightness:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_overburn", index=2)
ui = pm.floatFieldGrp(label="Threshold:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_threshold", index=2)
pm.separator()
ui = pm.intFieldGrp(label="Upsample:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".aa_upsample", index=2)
ui = pm.floatFieldGrp(label="Mb Subframes:", numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".mb_subframes", index=2)
pm.separator()
attr = pm.Attribute(self.renderGlobalsNodeName + ".pixelOrder")
ui = pm.attrEnumOptionMenuGrp(label="Pixel Order", at=self.renderGlobalsNodeName + ".pixelOrder", ei=self.getEnumList(attr))
with pm.frameLayout(label="Filtering", collapsable=True, collapse=False):
attr = pm.Attribute(self.renderGlobalsNodeName + ".filtertype")
ui = pm.attrEnumOptionMenuGrp(label="Filter Type", at=self.renderGlobalsNodeName + ".filtertype", ei=self.getEnumList(attr))
sDict['filterSize'] = pm.floatFieldGrp(label="Filter Size:", numberOfFields=1)
pm.connectControl(sDict['filterSize'], self.renderGlobalsNodeName + ".filtersize", index=2)
with pm.frameLayout(label="Features", collapsable=True, collapse=False):
sDict['doMb'] = pm.checkBoxGrp(label="Motion Blur:")
pm.connectControl(sDict['doMb'], self.renderGlobalsNodeName + ".doMb", index=2)
sDict['doDof'] = pm.checkBoxGrp(label="Depth Of Field:")
pm.connectControl(sDict['doDof'], self.renderGlobalsNodeName + ".doDof", index=2)
sDict['camSingleSided'] = pm.checkBoxGrp(label="Render Single Sided:")
pm.connectControl(sDict['camSingleSided'], self.renderGlobalsNodeName + ".camSingleSided", index=2)
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.setUITemplate("renderGlobalsTemplate", popTemplate=True)
pm.formLayout(parentForm, edit=True, attachForm=[ (scLo, "top", 0), (scLo, "bottom", 0), (scLo, "left", 0), (scLo, "right", 0) ])
self.KrayRendererUpdateTab()
self.KrayRendererUIUpdateCallback("sampling")
pm.scriptJob(attributeChange=[self.renderGlobalsNode.samplingType, pm.Callback(self.KrayRendererUIUpdateCallback, "sampling")])
pm.scriptJob(attributeChange=[self.renderGlobalsNode.filtertype, pm.Callback(self.KrayRendererUIUpdateCallback, "sampling")])
pm.scriptJob(attributeChange=[self.renderGlobalsNode.imageFormat, pm.Callback(self.KrayRendererUIUpdateCallback, "sampling")])
def KrayRendererUIUpdateCallback(self, what=None):
self.createGlobalsNode()
#self.updateEnvironment()
log.debug("KrayRendererUIUpdateCallback(): " + str(what))
if what == "environment":
log.debug("Update environment")
eDict = self.rendererTabUiDict['environment']
if what == "background":
log.debug("Update background")
eDict = self.rendererTabUiDict['background']
eType = self.renderGlobalsNode.backgroundType.get()
for key in eDict:
eDict[key].setEnable(val=False)
if eType == 0: #color
eDict['environmentColor'].setEnable(val=True)
if eType == 1 or eType == 2: #physSky
eDict['sunDir'].setEnable(val=True)
eDict['orientation'].setEnable(val=True)
eDict['turbidity'].setEnable(val=True)
eDict['exposure'].setEnable(val=True)
if eType == 2: #physSky2
eDict['groundAlbedo'].setEnable(val=True)
if eType == 3 or eType == 4: #sky
eDict['gradientHorizon'].setEnable(val=True)
eDict['gradientZenit'].setEnable(val=True)
eDict['nadir'].setEnable(val=True)
eDict['zenithDir'].setEnable(val=True)
if eType == 4: #sky2
eDict['skyGamma'].setEnable(val=True)
eDict['groundGamma'].setEnable(val=True)
if eType == 5 or eType == 6: #bitmap
eDict['environmentMap'].setEnable(val=True)
if eType == 6: #bitmap2
eDict['environmentMap2'].setEnable(val=True)
if eType == 7: #directionsMap
pass
if eType == 8: #lightMap
pass
if eType == 9: #sphericalMap
eDict['environmentMap'].setEnable(val=True)
if what == "sampling":
print "Update sampling"
sDict = self.rendererTabUiDict['sampling']
sType = self.renderGlobalsNode.samplingType.get()
if sType != 1: #grid
pm.intFieldGrp(sDict['gridSize'], edit=True, enable=False)
pm.checkBoxGrp(sDict['gridRotate'], edit=True, enable=False)
else:
pm.intFieldGrp(sDict['gridSize'], edit=True, enable=True)
pm.checkBoxGrp(sDict['gridRotate'], edit=True, enable=True)
fType = self.renderGlobalsNode.filtertype.get()
if fType in [4, 5, 6]:
pm.floatFieldGrp(sDict['filterSize'], edit=True, enable=False)
else:
pm.floatFieldGrp(sDict['filterSize'], edit=True, enable=True)
iFormat = self.renderGlobalsNode.imageFormat.get()
if iFormat in [2, 3]:
pm.attrEnumOptionMenuGrp(sDict['bitdepth'], edit=True, enable=True)
else:
pm.attrEnumOptionMenuGrp(sDict['bitdepth'], edit=True, enable=False)
if iFormat == 1:
pm.intFieldGrp(sDict['jpgQuality'], edit=True, enable=True)
else:
pm.intFieldGrp(sDict['jpgQuality'], edit=True, enable=False)
def KrayRendererUpdateTab(self, dummy=None):
self.createGlobalsNode()
#self.updateEnvironment()
log.debug("KrayRendererUpdateTab()")
sDict = self.rendererTabUiDict['sampling']
sType = self.renderGlobalsNode.samplingType.get()
if sType != 1: #grid
pm.intFieldGrp(sDict['gridSize'], edit=True, enable=False)
pm.checkBoxGrp(sDict['gridRotate'], edit=True, enable=False)
else:
pm.intFieldGrp(sDict['gridSize'], edit=True, enable=True)
pm.checkBoxGrp(sDict['gridRotate'], edit=True, enable=True)
fType = self.renderGlobalsNode.filtertype.get()
if fType in [4, 5, 6]:
pm.floatFieldGrp(sDict['filterSize'], edit=True, enable=False)
else:
pm.floatFieldGrp(sDict['filterSize'], edit=True, enable=True)
iFormat = self.renderGlobalsNode.imageFormat.get()
if iFormat in [2, 3]:
pm.attrEnumOptionMenuGrp(sDict['bitdepth'], edit=True, enable=True)
else:
pm.attrEnumOptionMenuGrp(sDict['bitdepth'], edit=True, enable=False)
if iFormat == 1:
pm.intFieldGrp(sDict['jpgQuality'], edit=True, enable=True)
else:
pm.intFieldGrp(sDict['jpgQuality'], edit=True, enable=False)
def xmlFileBrowse(self, args=None):
filename = pm.fileDialog2(fileMode=0, caption="Kray Export File Name")
if len(filename) > 0:
print "Got filename", filename
self.rendererTabUiDict['xml']['xmlFile'].setText(filename[0])
def dirBrowse(self, args=None):
dirname = pm.fileDialog2(fileMode=3, caption="Select dir")
if len(dirname) > 0:
self.rendererTabUiDict['opti']['optiField'].setText(dirname[0])
def KrayTranslatorCreateTab(self):
log.debug("KrayTranslatorCreateTab()")
self.createGlobalsNode()
parentForm = pm.setParent(query=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
scLo = self.rendererName + "TrScrollLayout"
with pm.scrollLayout(scLo, horizontalScrollBarThickness=0):
with pm.columnLayout(self.rendererName + "TrColumnLayout", adjustableColumn=True, width=400):
with pm.frameLayout(label="Translator", collapsable=True, collapse=False):
attr = pm.Attribute(self.renderGlobalsNodeName + ".translatorVerbosity")
ui = pm.attrEnumOptionMenuGrp(label="Translator Verbosity", at=self.renderGlobalsNodeName + ".translatorVerbosity", ei=self.getEnumList(attr))
with pm.frameLayout(label="Kray Scene export", collapsable=True, collapse=False):
ui = pm.checkBoxGrp(label="Export Scene File:", value1=False)
pm.connectControl(ui, self.renderGlobalsNodeName + ".exportSceneFile", index=2)
xmlDict = {}
self.rendererTabUiDict['xml'] = xmlDict
with pm.rowColumnLayout(nc=3, width=120):
pm.text(label="SceneFileName:", width=60, align="right")
defaultXMLPath = pm.workspace.path + "/" + pm.sceneName().basename().split(".")[0] + ".kray"
xmlDict['xmlFile'] = pm.textField(text=defaultXMLPath, width=60)
pm.symbolButton(image="navButtonBrowse.png", c=self.xmlFileBrowse)
pm.connectControl(xmlDict['xmlFile'], self.renderGlobalsNodeName + ".exportSceneFileName", index=2)
with pm.frameLayout(label="Optimize Textures", collapsable=True, collapse=False):
with pm.rowColumnLayout(nc=3, width=120):
optiDict = {}
# pm.text(label="OptimizedTex Dir:", width = 60, align="right")
# self.rendererTabUiDict['opti'] = optiDict
# pm.symbolButton(image="navButtonBrowse.png", c=self.dirBrowse)
# optiDict['optiField'] = pm.textField(text = self.renderGlobalsNode.optimizedTexturePath.get(), width = 60)
# pm.connectControl(optiDict['optiField'], self.renderGlobalsNodeName + ".optimizedTexturePath", index = 2 )
with pm.frameLayout(label="Additional Settings", collapsable=True, collapse=False):
ui = pm.floatFieldGrp(label="Scene scale:", value1=1.0, numberOfFields=1)
pm.connectControl(ui, self.renderGlobalsNodeName + ".sceneScale", index=2)
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.formLayout(parentForm, edit=True, attachForm=[ (scLo, "top", 0), (scLo, "bottom", 0), (scLo, "left", 0), (scLo, "right", 0) ])
def KrayTranslatorUpdateTab(self):
log.debug("KrayTranslatorUpdateTab()")
def createImageFormats(self):
self.createGlobalsNode()
iList = self.renderGlobalsNode.imageFormat.getEnums()
self.imageFormats = []
self.imageFormats.extend(iList)
def registerNodeExtensions(self):
"""Register Kray specific node extensions. e.g. camera type, diaphram_blades and others
"""
pass
def setImageName(self):
self.renderGlobalsNode.basePath.set(pm.workspace.path)
self.renderGlobalsNode.imagePath.set(pm.workspace.path + pm.workspace.fileRules['images'])
imageName = pm.sceneName().basename().replace(".ma", "").replace(".mb", "")
# check for mayabatch name like sceneName_number
numberPart = imageName.split("__")[-1]
try:
number = int(numberPart)
if number > 999:
imageName = imageName.replace("__" + numberPart, "")
except:
pass
self.renderGlobalsNode.imageName.set(imageName)
def renderProcedure(self, width, height, doShadows, doGlow, camera, options):
log.debug("renderProcedure")
self.createGlobalsNode()
self.preRenderProcedure()
self.setImageName()
if pm.about(batch=True):
pm.mayatokray()
else:
pm.mayatokray(width=width, height=height, camera=camera)
self.postRenderProcedure()
def startIprRenderProcedure(self, editor, resolutionX, resolutionY, camera):
self.ipr_isrunning = True
log.debug("startIprRenderProcedure")
print "startIprRenderProcedure", editor, resolutionX, resolutionY, camera
self.createGlobalsNode()
self.preRenderProcedure()
self.setImageName()
pm.mayatokray(width=resolutionX, height=resolutionY, camera=camera, startIpr=True)
self.postRenderProcedure()
def stopIprRenderProcedure(self):
self.ipr_isrunning = False
log.debug("stopIprRenderProcedure")
pm.mayatokray(stopIpr=True)
self.postRenderProcedure()
def preRenderProcedure(self):
self.createGlobalsNode()
if self.renderGlobalsNode.threads.get() == 0:
#TODO this is windows only, search for another solution...
numThreads = int(os.environ['NUMBER_OF_PROCESSORS'])
self.renderGlobalsNode.threads.set(numThreads)
# if not self.renderGlobalsNode.optimizedTexturePath.get() or len(self.renderGlobalsNode.optimizedTexturePath.get()) == 0:
# optimizedPath = pm.workspace.path / pm.workspace.fileRules['renderData'] / "optimizedTextures"
# if not os.path.exists(optimizedPath):
# optimizedPath.makedirs()
# self.renderGlobalsNode.optimizedTexturePath.set(str(optimizedPath))
#
# #craete optimized exr textures
# optimizeTextures.preRenderOptimizeTextures(optimizedFilePath = self.renderGlobalsNode.optimizedTexturePath.get())
def postRenderProcedure(self):
pass
# optimizeTextures.postRenderOptimizeTextures()
def afterGlobalsNodeReplacement(self):
log.debug("afterGlobalsNodeReplacement")
self.rendererTabUiDict = {}
"""
This procedure loads all AETemplates that are loaceted in the AETemplates module.
Normally if you load pymel, it automatically loads the templates but only the ones it finds in the
very first AETemplates directory. If you have several OpenMaya renderers loaded or if you have your own
AETemplates directory, the automatic loading will not work. So I replace it with this procedure.
"""
def loadAETemplates():
rendererName = "Kray"
aeDir = path.path(__file__).dirname() + "/" + rendererName +"/AETemplate/"
for d in aeDir.listdir("*.py"):
if d.endswith("Template.py"):
templateName = d.basename().replace(".py", "")
pythonCommand = "import {1}.AETemplate.{0}".format(templateName, rendererName)
melCommand = 'python("{0}");'.format(pythonCommand)
#log.debug("load aeTemplate: " + templateName + " : " + melCommand)
pm.mel.eval(melCommand)
def theRenderer():
return KrayRenderer.theRenderer()
def initRenderer():
try:
log.debug("Init renderer Kray")
theRenderer().registerRenderer()
loadAETemplates()
except:
traceback.print_exc(file=sys.__stderr__)
log.error("Init renderer Kray FAILED")
def unregister():
theRenderer().unRegisterRenderer()
log.debug("Unregister done")
def uiCallback(what=None):
theRenderer().KrayRendererUIUpdateCallback(what)
| {
"content_hash": "de290f4fd998316af80044ef2bf8035c",
"timestamp": "",
"source": "github",
"line_count": 669,
"max_line_length": 169,
"avg_line_length": 63.3168908819133,
"alnum_prop": 0.5953162255955051,
"repo_name": "haggi/OpenMaya",
"id": "54fbdc162d09706f807d0d14ada0a50c75ee878b",
"size": "42359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mayaToKray/mtkr_devmodule/scripts/mtkr_initialize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "5333"
},
{
"name": "Batchfile",
"bytes": "587"
},
{
"name": "C",
"bytes": "246300"
},
{
"name": "C++",
"bytes": "4178594"
},
{
"name": "Mathematica",
"bytes": "12660820"
},
{
"name": "Objective-C",
"bytes": "316"
},
{
"name": "Python",
"bytes": "1583249"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feed', '0005_auto_20160418_0325'),
]
operations = [
migrations.AlterField(
model_name='feed',
name='feedurl',
field=models.URLField(unique=True),
),
]
| {
"content_hash": "794294e88a49790daec148278fbfae9f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 47,
"avg_line_length": 20.61111111111111,
"alnum_prop": 0.5849056603773585,
"repo_name": "kiwiheretic/logos-v2",
"id": "99d29fc689dd7326ff2c031de5892c0ff50539b9",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feed/migrations/0006_auto_20160418_0337.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "39336"
},
{
"name": "HTML",
"bytes": "90623"
},
{
"name": "JavaScript",
"bytes": "2169514"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Python",
"bytes": "610582"
},
{
"name": "SCSS",
"bytes": "79489"
},
{
"name": "Shell",
"bytes": "5552"
}
],
"symlink_target": ""
} |
'test unilint cli'
import unittest
from mock import patch
from unilint.unilint_main import register_formatter, register_plugin, \
FORMATTERS, PLUGINS, resolve_plugins
from unilint.unilint_main import extend_maybe, order_issues, remove_duplicates
from mock import Mock
# pylint: disable=R0904,C0111
class HelperFunctionsTest(unittest.TestCase):
def test_register_formatter(self):
def foo_fun():
pass
register_formatter('foo', foo_fun)
self.assertTrue('foo' in FORMATTERS)
self.assertEqual(foo_fun, FORMATTERS['foo'])
def test_register_plugin(self):
with patch('unilint.unilint_plugin.UnilintPlugin') as plugin:
plugin.get_id.return_value = "bar"
plugin.get_depends.return_value = []
register_plugin(plugin)
self.assertTrue('bar' in PLUGINS)
self.assertEqual(plugin, PLUGINS['bar'])
def test_extend_maybe_list(self):
list1 = [1]
extend_maybe(list1, None)
self.assertEqual([1], list1)
extend_maybe(list1, [])
self.assertEqual([1], list1)
extend_maybe(list1, [2])
self.assertEqual([1, 2], list1)
def test_extend_maybe_dict(self):
list1 = {1: [2]}
extend_maybe(list1, None)
self.assertEqual({1: [2]}, list1)
extend_maybe(list1, {})
self.assertEqual({1: [2]}, list1)
extend_maybe(list1, {3: [4]})
self.assertEqual({1: [2], 3: [4]}, list1)
extend_maybe(list1, {1: [5]})
self.assertEqual({1: [2, 5], 3: [4]}, list1)
def test_resolve_plugins(self):
with patch('unilint.unilint_plugin.UnilintPlugin') as plugin:
with patch('unilint.unilint_plugin.UnilintPlugin') as plugin2:
plugin.get_id.return_value = "bar1"
plugin.get_depends.return_value = []
register_plugin(plugin)
plugin2.get_id.return_value = "pop1"
plugin2.get_depends.return_value = []
register_plugin(plugin2)
self.assertEqual(1, len(resolve_plugins('bar1', '')))
self.assertEqual(2, len(resolve_plugins('bar1,pop1', '')))
self.assertFalse('bar1' in resolve_plugins(None, 'bar1'))
self.assertTrue('pop1' in resolve_plugins(None, 'bar1'))
def test_resolve_plugins_depends(self):
with patch('unilint.unilint_plugin.UnilintPlugin') as plugin:
with patch('unilint.unilint_plugin.UnilintPlugin') as plugin2:
plugin.get_id.return_value = "bar2"
register_plugin(plugin)
plugin2.get_id.return_value = "pop2"
register_plugin(plugin2)
plugin2.get_depends.return_value = ["bar2"]
self.assertEqual(2, len(resolve_plugins('pop2', '')))
def test_order_issues(self):
with patch('unilint.issue.Issue') as issue1:
with patch('unilint.issue.Issue') as issue2:
issue1.path = "aaa"
issue1.line_number_start = 2
issue2.path = "aaa"
issue2.line_number_start = 1
self.assertEqual([issue2, issue1],
order_issues([issue1, issue2]))
self.assertEqual([issue2, issue1],
order_issues([issue2, issue1]))
issue1.path = "aaa"
issue1.line_number_start = 1
issue1.line_position = 2
issue2.path = "aaa"
issue2.line_number_start = 1
issue2.line_position = 1
self.assertEqual([issue2, issue1],
order_issues([issue1, issue2]))
self.assertEqual([issue2, issue1],
order_issues([issue2, issue1]))
def test_remove_duplicates(self):
self.assertEqual([], remove_duplicates([]))
issue1 = Mock()
issue2 = Mock()
issue1.path = '/filename'
issue2.path = '/filename'
issue1.severity = 10
issue2.severity = 10
issue1.line_number_start = None
issue2.line_number_start = None
issue1.line_position = None
issue2.line_position = None
issue1.message = 'foo'
issue2.message = 'bar'
self.assertEqual([issue1, issue2], remove_duplicates([issue1, issue2]))
issue1.line_number_start = 42
issue2.line_number_start = 42
issue1.message = 'line too long'
issue2.message = 'unused import foo'
self.assertEqual([issue1, issue2], remove_duplicates([issue1, issue2]))
issue1.line_number_start = 42
issue2.line_number_start = 42
issue1.line_position = -1
issue2.line_position = -1
issue1.message = 'unused import bar'
issue2.message = 'module bar not used'
self.assertEqual([issue1], remove_duplicates([issue1, issue2]))
issue1.line_number_start = 42
issue2.line_number_start = 42
issue1.line_position = 12
issue2.line_position = 28
issue1.message = 'unused import bar'
issue2.message = 'module foo not used'
self.assertEqual([issue1, issue2], remove_duplicates([issue1, issue2]))
issue1.line_number_start = 32
issue2.line_number_start = 32
issue1.line_position = 80
issue2.line_position = -1
issue1.message = 'line too long (92 characters)'
issue2.message = 'Line too long (92/80)'
self.assertEqual([issue1], remove_duplicates([issue1, issue2]))
issue1.line_number_start = 32
issue2.line_number_start = 33
issue1.line_position = 80
issue2.line_position = -1
issue1.message = 'line too long (92 characters)'
issue2.message = 'Line too long (92/80)'
self.assertEqual([issue1, issue2], remove_duplicates([issue1, issue2]))
| {
"content_hash": "0312b79e709c12bdb5c90c6e81d0a53a",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 41.53846153846154,
"alnum_prop": 0.5786195286195286,
"repo_name": "tkruse/unilint",
"id": "f97e18ba94fd8dbb967ff9dc8284c692be41d20d",
"size": "5940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/test_unilint_main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "125362"
}
],
"symlink_target": ""
} |
from pprint import pprint
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
symbol = 'ETH/BTC'
exchange = ccxt.poloniex({
'enableRateLimit': True, # or .enableRateLimit = True later
})
# print 10 times with appropriate delay
for i in range(0, 10):
print('--------------------------------------------------------------------')
ticker = exchange.fetch_ticker(symbol)
ticker = exchange.omit(ticker, 'info')
pprint(ticker)
| {
"content_hash": "e6da91af4f376935d7a7180cfed6302b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 24.434782608695652,
"alnum_prop": 0.603202846975089,
"repo_name": "tritoanst/ccxt",
"id": "e15a5a254ac420b6733bca842c6b91db06b1e5ab",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/py/basic-rate-limiting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3955653"
},
{
"name": "PHP",
"bytes": "783191"
},
{
"name": "Python",
"bytes": "680573"
},
{
"name": "Shell",
"bytes": "833"
}
],
"symlink_target": ""
} |
from asposebarcode import Settings
from com.aspose.barcode import BarCodeBuilder
from com.aspose.barcode import Symbology
class SetAspectRatio:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWith2DBarcodes/Utility2DBarcodeFeatures/SetAspectRatio/'
# Instantiate barcode object
builder = BarCodeBuilder()
symbology= Symbology
builder.setSymbologyType(symbology.Pdf417)
builder.setCodeText("1234567890")
# Set Aspect Ratio to 3:2 or 1.5
builder.setAspectRatio(1.5)
# Save the image
builder.save(dataDir + "SetAspectRatio.jpg")
# Display Status
print "Set Aspect Ratio Successfully."
if __name__ == '__main__':
SetAspectRatio() | {
"content_hash": "0089813d9c717b8433b208cde9b8f1ea",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 101,
"avg_line_length": 26.379310344827587,
"alnum_prop": 0.661437908496732,
"repo_name": "aspose-barcode/Aspose.BarCode-for-Java",
"id": "52d4999c171f5fdcab11633c28ca0220c385f244",
"size": "765",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Plugins/Aspose.BarCode Java for Jython/asposebarcode/WorkingWith2DBarcodes/Utility2DBarcodeFeatures/SetAspectRatio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7424"
},
{
"name": "Java",
"bytes": "392091"
},
{
"name": "PHP",
"bytes": "53227"
},
{
"name": "Python",
"bytes": "42629"
},
{
"name": "Ruby",
"bytes": "47116"
}
],
"symlink_target": ""
} |
import numpy as np
from abbrator import Abbrator
import pdb
class Letterman(object):
def __init__(self, ideogram, karyotype):
"""
self.karyotype
Abbrator
"""
self.ideogram= ideogram
self.karyotype= karyotype
self.init_params()
def define_params(self):
"""
@char_step_size: length/step unit of a character on the circos circle
@threshold: abundance threshold which determines the color of the pathway name.
@coors_between_letters: space between letters. check the unit of this: TODO!!!
@total_coordinate_len: total number of coordinates on circle
@coor_step_size: number of units that one coordinate covers
"""
pass
def calculate_font_color_abundance_threshold(self):
total_abundance_value= self.ideogram.get_total_value()
### TODO: check the calculation of quartiles
values= sorted([chrom.get_unique_total_value() for chrom in self.ideogram.chromosomes])
## yellow to red abundance threshold
## white-yellow colored pathways are more visible with darker letters
## red-dark red colored pathways are more visible with white ltters
self.threshold1= np.percentile(values, 60)
self.threshold2= np.percentile(values, 90)
def init_params(self):
"""
ideogram-wise parameters.
"""
self.coor_step_size= 5
self.calculate_font_color_abundance_threshold()
# total number of features/coordinates at hand
total_coordinate_len= len(self.ideogram)
# total space for writing all pathway names
total_ideogram_step_size= total_coordinate_len * self.coor_step_size
### broadest level will fit more letters.
#### calculate this by dividing the total space by char_size/len TODO
#### define the length of a character by finding how man units the set font
# corresponds to.
#### TODO this will be changed. instead of setting total letters before hadn
# we will set the the char_len before hand since we can have and idea of it
# through the font size.
if len(self.ideogram) < 500:
n_total_letters= 135
elif len(self.ideogram) > 500 and len(self.ideogram) < 2000:
n_total_letters= 180
elif len(self.ideogram) > 2000:
n_total_letters= 250
self.char_step_size= int(total_ideogram_step_size / n_total_letters)
def yield_text(self, chrom):
"""
chromosome-wise parameters
"""
name= chrom.name
n_coordinates= len(chrom)
#####
total_chromosome_step_size = n_coordinates * self.coor_step_size
#####
n_allowed_chars = int(np.floor(total_chromosome_step_size / self.char_step_size))
abbr_pathway= Abbrator(name, n_allowed_chars).abbr
###
chromosome_karyotype_name= chrom.get_coordinates()[0].get_name_by_level(1).strip()
###
### store abbreviations for each pathway in a dict
#self.abbreviations[name] = abbr_pathway
#### set the font color
pathway_abundance= chrom.get_unique_total_value()
if pathway_abundance < self.threshold1 :
color = "dgrey"
elif pathway_abundance >= self.threshold1 and pathway_abundance < self.threshold2:
color= "vdgrey"
else:
color= "white"
options= "color=%s" %color
####
### align the pathway name center
free_space= total_chromosome_step_size - (len(abbr_pathway) * self.char_step_size)
free_coors= free_space / self.coor_step_size
### free space is from both sides
init_coor= free_coors / 2
### TODO: check why this can be below 0.
if init_coor < 0:
pdb.set_trace()
init_coor= 0
### we give the blank penalty because we dont wanna move three step sizes for the character
# after the space character but a max of one step size.
chrom_blank_penalty= 0
for i in range(len(abbr_pathway)):
coor_index= int(init_coor) + int(i * self.char_step_size / self.coor_step_size) -1
#if "HD" in abbr_pathway:
# pdb.set_trace()
#if "ENE" in abbr_pathway:
# pdb.set_trace()
letter= abbr_pathway[i]
if letter != " ":
### we dont wanna write dot(.)s on circos plot.
penalty= np.floor(chrom_blank_penalty * self.char_step_size / self.coor_step_size / 2)
coor_index -= int(penalty)
if coor_index < 0: coor_index= 0
coor= chrom.get_coordinates()[coor_index]
crs= coor.get_coordinate()
startCoor= crs[0]
line = [self.karyotype[chromosome_karyotype_name], str(startCoor),
str(startCoor + int(self.char_step_size)), letter, options]
yield "\t".join(line)+'\n'
else:
chrom_blank_penalty+=1
def trash_note(self):
#avg= float(total_value) / len(self)
### avg does not work in our case. check what are the light
### colors and what are the dark colors in the circos coloring
### scheme. in worst case, color dark if the abundance level s
### lower than 2/3 instead of averadge.
### below parameters are related to the karyotype size!!
### TODO adjust them accordingly!!!!
### factors: directionality, color
###
#" current problems:
# 1 highlights and text coordinates do not match with the heatmap coordinates. check it out
# 2 spaces between words should not take 3 times the siz of an empty character.
# 3 ....
#lenCharCoors= totalLetters / float(total_coordinate_len)
#lenCharCoors= 2
#totalLetters= 300
#lenCharCoors= 3
#totalLetters= 200
#lenCharCoors= 3
#allowedCharLen = int(coordinateLen * lenCharCoors)
## below options did not work
#option2= "label_rotate=no"
#option3= "label_parallel=no"
#options= ",".join([option1, option2, option3])
#free_char_cpace= allowed_char_len - len(abbr_pathway.replace(' ','').replace('.',''))
#freeCharCoors= free_space * lenCharCoors * 5
#textCoors= int(len(abbr_pathway.replace(' ','')) * lenCharCoors)
#except:
# pdb.set_trace()
#penalty = (blank_penalty * int((lenCharCoors+1)*5)) - int(blank_penalty* lenCharCoors*5)
pass
| {
"content_hash": "8d360c116c8a555faea8c461f46637df",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 109,
"avg_line_length": 30.406639004149376,
"alnum_prop": 0.5402565502183406,
"repo_name": "ecotox/pacfm",
"id": "6f9230ea864ad106d9fa1791c767469fbc54697b",
"size": "7328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pacfm/model/helper/letterman.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "96986"
},
{
"name": "C",
"bytes": "2253401"
},
{
"name": "Java",
"bytes": "28647"
},
{
"name": "Makefile",
"bytes": "88653"
},
{
"name": "Matlab",
"bytes": "14547"
},
{
"name": "Objective-C",
"bytes": "1061"
},
{
"name": "OpenEdge ABL",
"bytes": "99470"
},
{
"name": "Pascal",
"bytes": "34142"
},
{
"name": "Perl",
"bytes": "705775"
},
{
"name": "Python",
"bytes": "224920"
},
{
"name": "Shell",
"bytes": "17458"
}
],
"symlink_target": ""
} |
import datetime
import logging
import os
import re
import html5lib
import tornado.web
import tornado.wsgi
from tornado.web import url
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext.webapp.util import run_wsgi_app
import forms
import models
import tasks
import uimodules
# Constants
IS_DEV = os.environ['SERVER_SOFTWARE'].startswith('Dev') # Development server
class Application(tornado.wsgi.WSGIApplication):
def __init__(self):
handlers = [
url(r'/', IndexHandler, name='index'),
url(r'/mine', HomeHandler, name='home'),
url(r'/new', NewBookmarkHandler, name='new_bookmark'),
url(r'/bookmarks/([^/]+)', ListBookmarksHandler, name='list'),
url(r'/edit', EditBookmarkHandler, name='edit'),
url(r'/update', UpdateBookmarkHandler, name='update'),
(r'/upload', UploadHandler),
(r'/later', ReadLaterHandler),
(r'/autocomplete', AutocompleteHandler),
# Task handlers
(r'/tasks/create_compute_tags', CreateComputeTagsTasksHandler),
(r'/tasks/create_check_bookmarks', CreateCheckBookmarksTasksHandler),
]
settings = dict(
debug=IS_DEV,
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
xsrf_cookies=True,
cookie_secret="zxccczxi123ijasdj9123asjdzcnjjl0j123jas9d0123asd",
ui_modules=uimodules,
)
tornado.wsgi.WSGIApplication.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
# I don't know why
def initialize(self):
self.xsrf_token
def get_current_user(self):
user = users.get_current_user()
if user:
user.admin = users.is_current_user_admin()
account = models.Account.get_account_for_user(user)
self.current_account = account
return user
def get_login_url(self):
return users.create_login_url(self.request.uri)
def render_string(self, template, **kwds):
return tornado.web.RequestHandler.render_string(
self, template, users=users, IS_DEV=IS_DEV,
current_account=getattr(self, 'current_account', None),
**kwds)
def get_integer(self, name, default, min_value=None, max_value=None):
value = self.get_argument(name, '')
if not isinstance(value, (int, long)):
try:
value = int(value)
except (TypeError, ValueError), err:
value = default
if min_value is not None:
value = max(min_value, value)
if max_value is not None:
value = min(value, max_value)
return value
class IndexHandler(BaseHandler):
def get(self):
self.render('index.html')
class HomeHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
if self.current_account.fresh:
self.current_account.fresh = False
self.current_account.put()
self.render('fresh.html')
return
query = models.Bookmark.all() \
.filter('account =', self.current_account) \
.order('-created')
tag = self.get_arguments('tag', None)
if tag is not None:
tag = tag[:2]
for tag in tag:
query = query.filter('tags =', tag)
offset = self.get_integer('offset', 0, 0)
limit = self.get_integer('limit', 25, 1, 100)
bookmarks = query.fetch(limit + 1, offset)
tags = self.current_account.get_popular_tags(20)
self.render('list.html', bookmarks=bookmarks, tags=tags)
class ListBookmarksHandler(BaseHandler):
def get(self, nickname):
account = models.Account.get_account_for_nickname(nickname)
if account is None:
raise tornado.web.HTTPError(404)
if self.current_user and account.key() == self.current_account.key():
query = models.Bookmark.all().filter('account =', self.current_account)
else:
query = models.Bookmark.all() \
.filter('account =', account) \
.filter('is_private =', False)
query = query.order('-created')
# Pagination
offset = self.get_integer('offset', 0, 0)
limit = self.get_integer('limit', 25, 1, 100)
params = {
'limit': limit,
'first': offset + 1,
}
bookmarks = query.fetch(limit + 1, offset)
tags = account.get_popular_tags(20)
self.render('list.html', bookmarks=bookmarks, tags=tags)
class NewBookmarkHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
# Check if popup
is_popup = self.get_argument('p', None) == '1'
is_unread = self.get_argument('unread', None) == '1'
if is_popup:
bookmark = self.current_account.get_bookmark_for_uri(
self.get_argument('uri'))
if bookmark is None:
form = forms.BookmarkForm(self)
else:
self.redirect(self.reverse_url('edit') +
'?&p=1&id=' + bookmark.uri_digest +
'&description=' + self.get_argument('description', ''))
return
else:
form = forms.BookmarkForm()
self.render('bookmark-form.html', form=form, is_popup=is_popup)
@tornado.web.authenticated
def post(self):
is_popup = self.get_argument('p', None) == '1'
form = forms.BookmarkForm(self)
if form.validate():
account = self.current_account
account_key_name = account.key().name()
uri_digest = models.Bookmark.get_digest_for_uri(form.uri.data)
key = '%s:%s' % (account_key_name, uri_digest)
bookmark = models.Bookmark(
key_name=key,
account=self.current_account,
uri_digest=uri_digest,
**form.data)
bookmark.put()
if is_popup:
self.write('<script>window.close()</script>')
else:
self.redirect(self.reverse_url('home'))
else:
self.render('bookmark-form.html', form=form)
class ReadLaterHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
bookmark = self.current_account.get_bookmark_for_uri(
self.get_argument('uri'))
if bookmark is None:
bookmark = models.Bookmark(uri=self.get_argument('uri'))
bookmark.account = self.current_account
bookmark.uri_digest = models.Bookmark.get_digest_for_uri(bookmark.uri)
bookmark.title = self.get_argument('title', bookmark.uri)
bookmark.description = self.get_argument('description', '')
bookmark.key_name = '%s:%s' % (self.current_account.key().name(),
bookmark.uri_digest)
bookmark.is_unread = True
bookmark.put()
self.write('<script>window.blur();window.close()</script>')
class EditBookmarkHandler(BaseHandler):
def get(self):
form = forms.BookmarkForm(obj=self.bookmark)
form.description.data = self.get_argument(
'description', self.bookmark.description)
self.render('bookmark-form.html', form=form)
def post(self):
form = forms.BookmarkForm(self, obj=self.bookmark)
if form.validate():
form.populate_obj(self.bookmark)
self.bookmark.put()
if self.get_argument('p', None):
self.write('<script>window.close()</script>')
else:
self.render('module-bookmark.html', bookmark=self.bookmark)
else:
self.render('bookmark-form.html', form=form)
@tornado.web.authenticated
def prepare(self):
id = self.get_argument('id')
bookmark = self.current_account.get_bookmark_for_digest(id)
if bookmark is None:
raise tornado.web.HTTPError(404)
if bookmark.account.key() != self.current_account.key():
raise tornado.web.HTTPError(403)
self.bookmark = bookmark
class UploadHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render('upload.html',
upload_url=blobstore.create_upload_url('/upload'))
@tornado.web.authenticated
def post(self):
if IS_DEV:
blob_key = re.findall(r'blob-key="*(\S+)"', self.request.body)[0]
else:
blob_key = re.findall(r'blob-key=(.+)', self.request.body)[0]
new_import = models.Import(account=self.current_account,
blob=blob_key)
new_import.put()
deferred.defer(tasks.ImportBookmarks, new_import.key())
self.redirect(self.reverse_url('home'))
class UpdateBookmarkHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
id = self.get_argument('id')
action = self.get_argument('action')
bookmark = self.current_account.get_bookmark_for_digest(id)
if bookmark is None:
raise tornado.web.HTTPError(404)
if bookmark.account.key() != self.current_account.key():
raise tornado.web.HTTPError(403)
if action == 'star':
bookmark.is_starred = True
elif action == 'unstar':
bookmark.is_starred = False
elif action == 'read':
bookmark.is_unread = False
elif action == 'unread':
bookmark.is_unread = True
bookmark.put()
self.render('module-bookmark.html', bookmark=bookmark)
class AutocompleteHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
q = self.get_argument('q').strip()
if len(q) < 2:
self.finish()
return
tags_cache_key = "%s:tags" % self.current_account.key()
tags = memcache.get(tags_cache_key)
if tags is None:
# TODO What if user has got 1000's of tags?
tags = set([
tag.name
for tag in models.Tag.all().filter('account =', self.current_account)])
if not memcache.add(tags_cache_key, tags):
logging.error("Cannot set account tags in memcache")
records = [tag for tag in tags if tag.startswith(q)]
self.write(dict(records=records))
# Cron handlers
class BaseTaskHandler(BaseHandler):
def initialize(self):
self.application.settings['xsrf_cookies'] = False
class CreateComputeTagsTasksHandler(BaseTaskHandler):
def get(self):
for account in models.Account.all():
deferred.defer(tasks.ComputeTagCounts, account.key())
class CreateCheckBookmarksTasksHandler(BaseTaskHandler):
def get(self):
for account in models.Account.all():
deferred.defer(tasks.CheckBookmarks, account.key())
def main():
run_wsgi_app(Application())
if __name__ == '__main__':
main()
| {
"content_hash": "f283ac8f2b6eb6bb1c7209cfd909e8c3",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 81,
"avg_line_length": 32.078369905956116,
"alnum_prop": 0.6500537476790775,
"repo_name": "haldun/bookmarks-gae",
"id": "8fc55ea858335fbd9775ddc76e98f07aa934d72e",
"size": "10233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37166"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('books.urls', namespace='index')),
url(r'^admin/', include(admin.site.urls)),
url(r'^customadmin/', include('customadmin.urls', namespace='customadmin')),
url(r'^accounts/', include('authentication.urls', namespace='accounts')),
url(r'^', include('books.urls', namespace='books')),
url(r'^people/', include('people.urls', namespace='people')),
url(r'^campaigns/', include('campaigns.urls', namespace='campaigns')),
url(r'^payment/', include('payment.urls', namespace='paygate')),
url(r'^contacts/', include('social_feeds.urls', namespace='social')),
url(r'^invite/', include('contacts.urls', namespace='contacts')),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"content_hash": "130d8a2b34faa0347775fdcce6737b9d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 80,
"avg_line_length": 30.75,
"alnum_prop": 0.6839430894308943,
"repo_name": "agiliq/fundraiser",
"id": "3e856915e19c2e50a4fa82b38da9331d137c566e",
"size": "984",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fund_raiser/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "55823"
},
{
"name": "JavaScript",
"bytes": "3822"
},
{
"name": "Python",
"bytes": "93926"
}
],
"symlink_target": ""
} |
from django_jinja.library import filter
__author__ = 'AlexStarov'
@filter(name='custom_QuerySet_filter', )
def custom_QuerySet_filter(value, variable, operation, value_variable, ):
value = value.filter(is_availability=1, ) # map(variable, operation, value_variable, ), ) # is_availability=1)
return value
| {
"content_hash": "3b271cc6d063364c9f5809f0893bcfb8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 116,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.7169811320754716,
"repo_name": "AlexStarov/Shop",
"id": "c3e72ad1305dcd22b561b975732630d20425ce7b",
"size": "342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/utils/templatetags/custom_QuerySet_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "268281"
},
{
"name": "HTML",
"bytes": "138853"
},
{
"name": "JavaScript",
"bytes": "10629133"
},
{
"name": "PHP",
"bytes": "14"
},
{
"name": "Python",
"bytes": "1532862"
},
{
"name": "Shell",
"bytes": "2089"
}
],
"symlink_target": ""
} |
import sys, time
import atexit
import OpenGL.GL as gl
import OpenGL.GLUT as glut
import key, mouse, event, proxy
import _ctypes
import threading
import traceback
import IPython
if sys.platform in ['linux2', 'darwin']:
import termios
# The one and only window
_window = None
def active_window():
return _window
class Singleton(object):
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
class Window(event.EventDispatcher, Singleton):
# Instance variables accessible only via properties
_fullscreen = False
_visible = False
# Subclasses should update these after relevant events
_mouse_x = 0
_mouse_y = 0
_button = mouse.NONE
_modifiers = None
_mouse_in_window = False
_event_queue = None
_time = None
_width = None
_height = None
_window_id = None
_timer_stack = []
_timer_date = []
_lock = threading.Lock()
_command_queue = []
# Class attributes
_default_width = 640
_default_height = 480
def __init__(self, width=None, height=None, caption=None, visible=True, fullscreen=False):
event.EventDispatcher.__init__(self)
self._event_queue = []
if width and width > 0:
self._width = width
else:
self._width = Window._default_width
if height and height > 0:
self._height = height
else:
self._height = Window._default_height
if caption is None:
caption = sys.argv[0]
self._caption = caption
self._saved_width = self._width
self._saved_height = self._height
if _window is None:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut.GLUT_DOUBLE |
glut.GLUT_RGBA |
glut.GLUT_DEPTH)
self._window_id = glut.glutCreateWindow(self._caption)
glut.glutDisplayFunc(self._display)
glut.glutReshapeFunc(self._reshape)
glut.glutKeyboardFunc(self._keyboard)
glut.glutKeyboardUpFunc(self._keyboard_up)
glut.glutMouseFunc(self._mouse)
glut.glutMotionFunc(self._motion)
glut.glutPassiveMotionFunc(self._passive_motion)
glut.glutVisibilityFunc(self._visibility)
glut.glutEntryFunc(self._entry)
glut.glutSpecialFunc(self._special)
glut.glutSpecialUpFunc(self._special_up)
gl.glClearColor(0,0,0,0)
self._visible = visible
self._time = glut.glutGet(glut.GLUT_ELAPSED_TIME)
if not visible:
glut.glutHideWindow()
else:
glut.glutShowWindow()
self.set_size(self._width, self._height)
screen_width = glut.glutGet(glut.GLUT_SCREEN_WIDTH)
screen_height= glut.glutGet(glut.GLUT_SCREEN_HEIGHT)
glut.glutPositionWindow((screen_width-self._width)//2,
(screen_height-self._height)//2)
self.fullscreen = fullscreen
def _keyboard(self, code, x, y):
symbol = self._keyboard_translate(code)
modifiers = glut.glutGetModifiers()
modifiers = self._modifiers_translate(modifiers)
state= self.dispatch_event('on_key_press', symbol, modifiers)
if not state and symbol == key.ESCAPE:
sys.exit()
def _keyboard_up(self, code, x, y):
modifiers = glut.glutGetModifiers()
self.dispatch_event('on_key_release',
self._keyboard_translate(code),
self._modifiers_translate(modifiers))
def _special(self, code, x, y):
modifiers = glut.glutGetModifiers()
self.dispatch_event('on_key_press',
self._keyboard_translate(code),
self._modifiers_translate(modifiers))
def _special_up(self, code, x, y):
modifiers = glut.glutGetModifiers()
self.dispatch_event('on_key_release',
self._keyboard_translate(code),
self._modifiers_translate(modifiers))
def _modifiers_translate(self, modifiers):
_modifiers = 0
if modifiers & glut.GLUT_ACTIVE_SHIFT:
_modifiers |= key.MOD_SHIFT
if modifiers & glut.GLUT_ACTIVE_CTRL:
_modifiers |= key.MOD_CTRL
if modifiers & glut.GLUT_ACTIVE_ALT:
_modifiers |= key.MOD_ALT
return _modifiers
def _keyboard_translate(self, code):
if getattr(code, 'lower', None):
ascii = ord(code)
else:
ascii = None
if ascii is not None and \
(0x020 <= ascii <= 0x07e):
# (0x020 <= ascii <= 0x040) or (0x05b <= ascii <= 0x07e):
return ascii
elif ascii is not None and (ascii < 0x020 or ascii == 0x07f):
if ascii == 0x07F: return key.BACKSPACE
elif ascii == 0x008: return key.DELETE
elif ascii == 0x009: return key.TAB
elif ascii == 0x00A: return key.LINEFEED
elif ascii == 0x00C: return key.CLEAR
elif ascii == 0x00D: return key.RETURN
elif ascii == 0x018: return key.CANCEL
elif ascii == 0x01B: return key.ESCAPE
elif code==glut.GLUT_KEY_F1: return key.F1
elif code==glut.GLUT_KEY_F2: return key.F2
elif code==glut.GLUT_KEY_F3: return key.F3
elif code==glut.GLUT_KEY_F4: return key.F4
elif code==glut.GLUT_KEY_F5: return key.F5
elif code==glut.GLUT_KEY_F6: return key.F6
elif code==glut.GLUT_KEY_F7: return key.F7
elif code==glut.GLUT_KEY_F8: return key.F8
elif code==glut.GLUT_KEY_F9: return key.F9
elif code==glut.GLUT_KEY_F10: return key.F10
elif code==glut.GLUT_KEY_F11: return key.F11
elif code==glut.GLUT_KEY_F12: return key.F12
elif code==glut.GLUT_KEY_LEFT: return key.LEFT
elif code==glut.GLUT_KEY_UP: return key.UP
elif code==glut.GLUT_KEY_RIGHT: return key.RIGHT
elif code==glut.GLUT_KEY_DOWN: return key.DOWN
elif code==glut.GLUT_KEY_PAGE_UP: return key.PAGEUP
elif code==glut.GLUT_KEY_PAGE_DOWN:return key.PAGEDOWN
elif code==glut.GLUT_KEY_HOME: return key.HOME
elif code==glut.GLUT_KEY_END: return key.END
elif code==glut.GLUT_KEY_INSERT: return key.INSERT
def _display(self):
#self.clear()
self.dispatch_event('on_draw')
self.flip()
def _idle(self):
t = glut.glutGet(glut.GLUT_ELAPSED_TIME)
dt = (t - self._time)/1000.0
self._time = t
self.dispatch_event('on_idle', dt)
def _reshape(self, width, height):
width = glut.glutGet(glut.GLUT_WINDOW_WIDTH)
height = glut.glutGet(glut.GLUT_WINDOW_HEIGHT)
self._width, self._height = width, height
self.dispatch_event('on_resize', self._width, self._height)
#glut.glutPostRedisplay()
def _visibility(self, state):
if state == glut.GLUT_VISIBLE:
self._visible = True
self.dispatch_event('on_show')
glut.glutPostRedisplay()
elif state == glut.GLUT_NOT_VISIBLE:
self._visible = False
self.dispatch_event('on_hide')
def _entry(self, state):
if state == glut.GLUT_ENTERED:
self._mouse_in_window = True
self.dispatch_event('on_mouse_enter')
elif state == glut.GLUT_LEFT:
self._mouse_in_window = False
self.dispatch_event('on_mouse_leave')
def _mouse(self, button, state, x, y):
y = self._height - y
if button == glut.GLUT_LEFT_BUTTON:
button = mouse.LEFT
elif button == glut.GLUT_MIDDLE_BUTTON:
button = mouse.MIDDLE
elif button == glut.GLUT_RIGHT_BUTTON:
button = mouse.RIGHT
if state == glut.GLUT_UP:
self._button = mouse.NONE
self._mouse_x = x
self._mouse_y = y
self.dispatch_event('on_mouse_release', x, y, button)
elif state == glut.GLUT_DOWN:
self._button = button
self._mouse_x = x
self._mouse_y = y
if button == 3:
self._button = mouse.NONE
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif button == 4:
self._button = mouse.NONE
self.dispatch_event('on_mouse_scroll', x, y, 0, -1)
else:
self.dispatch_event('on_mouse_press', x, y, button)
def _motion(self, x, y):
y = self._height - y
dx = x - self._mouse_x
dy = y - self._mouse_y
self._mouse_x = x
self._mouse_y = y
self.dispatch_event('on_mouse_drag', x, y, dx, dy, self._button)
def _passive_motion(self, x, y):
y = self._height - y
dx = x - self._mouse_x
dy = y - self._mouse_y
self._mouse_x = x
self._mouse_y = y
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
def _push(self, obj, args, kwargs):
''' Push a new object call onto the stack '''
class container(object):
def __init__(self):
self.value = None
self.filled = False
def __call__(self, value=None):
self.value = value
self.filled = True
output = container()
self._lock.acquire()
self._command_queue.append((obj, args, kwargs, output))
self._lock.release()
while not output.filled: pass
return output.value
def _pop(self, value):
''' Process one object call from the stack '''
if not len(self._command_queue):
glut.glutTimerFunc(100, self._pop, 0)
return True
self._lock.acquire()
function, args, kwargs, output = self._command_queue.pop(0)
self._lock.release()
try:
result = function(*args,**kwargs)
except:
traceback.print_exc()
result = None
if output:
output(result)
glut.glutTimerFunc(100, self._pop, 0)
glut.glutPostRedisplay()
return True
def mainloop(self, interactive=False, namespace=globals()):
'''Starts main loop
'''
# Start timers
for i in range(len(self._timer_stack)):
def func(index):
handler, fps = self._timer_stack[index]
t = glut.glutGet(glut.GLUT_ELAPSED_TIME)
dt = (t - self._timer_date[index])/1000.0
self._timer_date[index] = t
handler(dt)
glut.glutTimerFunc(int(1000./fps), func, index)
self._timer_date[index] = glut.glutGet(glut.GLUT_ELAPSED_TIME)
fps = self._timer_stack[i][1]
glut.glutTimerFunc(int(1000./fps), func, i)
# Start idle only if necessary
for item in self._event_stack:
if 'on_idle' in item.keys():
glut.glutIdleFunc(self._idle)
self.dispatch_event('on_init')
# Starts non-interactive mode
if not interactive:
glut.glutMainLoop()
sys.exit()
# Starts interactive mode
# Save tty mode on linux/darwin
if sys.platform in ['linux2', 'darwin']:
self.term_state = termios.tcgetattr(sys.stdin)
namespace = namespace.copy()
for key in namespace.keys():
f = namespace[key]
if key[:2] == 'gl' and isinstance(namespace[key], _ctypes.CFuncPtr):
namespace[key] = proxy.Proxy(f,self)
def session_start():
self.shell = IPython.ipapi.make_session(namespace)
self.shell.IP.interact() #mainloop()
sys.exit()
self.session = threading.Thread(target=session_start)
self.session.start()
@atexit.register
def goodbye():
self.shell.IP.ask_exit()
# Restore tty state on linux/darwin
if sys.platform in ['linux2', 'darwin']:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.term_state)
sys.stdout.write('\n')
glut.glutTimerFunc(100, self._pop, 0)
glut.glutMainLoop()
def get_fullscreen(self):
''' Get fullscreen mode '''
return self._fullscreen
def set_fullscreen(self, state):
''' Exit fullscreen mode '''
self._fullscreen = state
if state:
self._saved_width = glut.glutGet(glut.GLUT_WINDOW_WIDTH)
self._saved_height = glut.glutGet(glut.GLUT_WINDOW_HEIGHT)
glut.glutFullScreen()
else:
glut.glutReshapeWindow(self._saved_width, self._saved_height)
def exit(self):
'''Exit mainloop'''
if (glut.glutLeaveMainLoop):
glut.glutLeaveMainLoop()
else:
sys.exit();
def timer(self, *args):
'''Function decorator for a timed handler.
Usage::
win = window.Window()
@win.timer(60)
def timer(dt):
# ...
'''
if len(args) != 1: return
if type(args[0]) not in (int,float): return
fps = args[0]
def decorator(func):
self._timer_stack.append((func, fps))
self._timer_date.append(0)
return func
return decorator
def set_size(self, width, height):
'''Resize the window.
The behaviour is undefined if the window is not resizable, or if
it is currently fullscreen.
The window size does not include the border or title bar.
:Parameters:
`width` : int
New width of the window, in pixels.
`height` : int
New height of the window, in pixels.
'''
glut.glutReshapeWindow(width,height)
def get_size(self):
'''Return the current size of the window.
The window size does not include the border or title bar.
:rtype: (int, int)
:return: The width and height of the window, in pixels.
'''
width = glut.glutGet(glut.GLUT_WINDOW_WIDTH)
height = glut.glutGet(glut.GLUT_WINDOW_HEIGHT)
return width,height
def clear(self):
'''Clear the window.
This is a convenience method for clearing the color and depth
buffer. The window must be the active one (see `switch_to`).
'''
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
def draw(self):
'''Draw the window.
This is a convenience method for forcing a redraw. The window must be
the active one (see `switch_to`).
'''
glut.glutPostRedisplay()
def flip(self):
'''Swap the OpenGL front and back buffers.
Call this method on a double-buffered window to update the
visible display with the back buffer. The contents of the back buffer
is undefined after this operation.
Windows are double-buffered by default. This method is called
automatically by `EventLoop` after the `on_draw` event.
'''
glut.glutSwapBuffers()
def on_resize(self, width, height):
'''A default resize event handler.
This default handler updates the GL viewport to cover the entire
window and sets the ``GL_PROJECTION`` matrix to be orthogonal in
window space. The bottom-left corner is (0, 0) and the top-right
corner is the width and height of the window in pixels.
Override this event handler with your own to create another
projection, for example in perspective.
'''
gl.glViewport(0, 0, width, height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
# These are the only properties that can be set
fullscreen = property(get_fullscreen,
set_fullscreen,
doc='''Fullscreen mode.
:type: bool
''')
width = property(lambda self: self.get_size()[0],
lambda self, width: self.set_size(width, self.height),
doc='''The width of the window, in pixels. Read-write.
:type: int
''')
height = property(lambda self: self.get_size()[1],
lambda self, height: self.set_size(self.width, height),
doc='''The height of the window, in pixels. Read-write.
:type: int
''')
Window.register_event_type('on_key_press')
Window.register_event_type('on_key_release')
Window.register_event_type('on_mouse_motion')
Window.register_event_type('on_mouse_drag')
Window.register_event_type('on_mouse_press')
Window.register_event_type('on_mouse_release')
Window.register_event_type('on_mouse_scroll')
Window.register_event_type('on_mouse_enter')
Window.register_event_type('on_mouse_leave')
Window.register_event_type('on_expose')
Window.register_event_type('on_resize')
Window.register_event_type('on_move')
# Window.register_event_type('on_activate')
# Window.register_event_type('on_deactivate')
Window.register_event_type('on_show')
Window.register_event_type('on_hide')
Window.register_event_type('on_draw')
Window.register_event_type('on_idle')
Window.register_event_type('on_init')
_window = Window(1,1,visible=False)
| {
"content_hash": "3b8510df14d2b3b24843771b098a3d1f",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 94,
"avg_line_length": 33.445283018867926,
"alnum_prop": 0.5700665688818685,
"repo_name": "davidcox/glumpy",
"id": "cd6b1f3d4197848c084a98024bdff64e900b4935",
"size": "18111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glumpy/window.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "339503"
},
{
"name": "Shell",
"bytes": "4511"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
],
),
]
| {
"content_hash": "112015d82d078ed5df154a1fb2fac57f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 114,
"avg_line_length": 22.789473684210527,
"alnum_prop": 0.5658198614318707,
"repo_name": "talaniz/django-tdd",
"id": "9afd30d093b76001ec456a08cc038046f95e93c7",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lists/migrations/0003_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7486"
},
{
"name": "HTML",
"bytes": "3332"
},
{
"name": "JavaScript",
"bytes": "117937"
},
{
"name": "Python",
"bytes": "33853"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.urlresolvers import reverse
from django.shortcuts import render
from core.common import *
from reports.forms import ReportForm
def report_form(request):
return render(request, 'reports/form.html', {
'settings': settings,
'user': get_user(request.user),
'form': ReportForm(instance=None, request=request),
})
| {
"content_hash": "84d4f9621032f2fc8cae0fa315af3f3b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 28.76923076923077,
"alnum_prop": 0.7540106951871658,
"repo_name": "PrincessTeruko/TsunArt",
"id": "3cba600ab6a801c1cdb4c9e0e7c56061d28599a3",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reports/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23615"
},
{
"name": "HTML",
"bytes": "34809"
},
{
"name": "JavaScript",
"bytes": "47538"
},
{
"name": "Python",
"bytes": "114688"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
} |
"""MeltingPotEnv as a MultiAgentEnv wrapper to interface with RLLib."""
from typing import Tuple
import dm_env
import dmlab2d
from gym import spaces
from ml_collections import config_dict
import numpy as np
from ray.rllib.agents import trainer
from ray.rllib.env import multi_agent_env
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from examples import utils
from meltingpot.python import substrate
from meltingpot.python.utils.policies import policy
PLAYER_STR_FORMAT = 'player_{index}'
class MeltingPotEnv(multi_agent_env.MultiAgentEnv):
"""An adapter between the Melting Pot substrates and RLLib MultiAgentEnv."""
def __init__(self, env: dmlab2d.Environment):
"""Initializes the instance.
Args:
env: dmlab2d environment to wrap. Will be closed when this wrapper closes.
"""
self._env = env
self._num_players = len(self._env.observation_spec())
self._ordered_agent_ids = [
PLAYER_STR_FORMAT.format(index=index)
for index in range(self._num_players)
]
# RLLib requires environments to have the following member variables:
# observation_space, action_space, and _agent_ids
self._agent_ids = set(self._ordered_agent_ids)
# RLLib expects a dictionary of agent_id to observation or action,
# Melting Pot uses a tuple, so we convert
self.observation_space = self._convert_spaces_tuple_to_dict(
utils.spec_to_space(self._env.observation_spec()),
remove_world_observations=True)
self.action_space = self._convert_spaces_tuple_to_dict(
utils.spec_to_space(self._env.action_spec()))
super().__init__()
def reset(self):
"""See base class."""
timestep = self._env.reset()
return utils.timestep_to_observations(timestep)
def step(self, action):
"""See base class."""
actions = [action[agent_id] for agent_id in self._ordered_agent_ids]
timestep = self._env.step(actions)
rewards = {
agent_id: timestep.reward[index]
for index, agent_id in enumerate(self._ordered_agent_ids)
}
done = {'__all__': timestep.last()}
info = {}
observations = utils.timestep_to_observations(timestep)
return observations, rewards, done, info
def close(self):
"""See base class."""
self._env.close()
def get_dmlab2d_env(self):
"""Returns the underlying DM Lab2D environment."""
return self._env
# Metadata is required by the gym `Env` class that we are extending, to show
# which modes the `render` method supports.
metadata = {'render.modes': ['rgb_array']}
def render(self, mode: str) -> np.ndarray:
"""Render the environment.
This allows you to set `record_env` in your training config, to record
videos of gameplay.
Args:
mode (str): The mode to render with (see
`MeltingPotEnv.metadata["render.modes"]` for supported modes).
Returns:
np.ndarray: This returns a numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable for turning
into a video.
"""
observation = self._env.observation()
world_rgb = observation['WORLD.RGB']
# RGB mode is used for recording videos
if mode == 'rgb_array':
return world_rgb
else:
return super().render(mode=mode)
def _convert_spaces_tuple_to_dict(
self,
input_tuple: spaces.Tuple,
remove_world_observations: bool = False) -> spaces.Dict:
"""Returns spaces tuple converted to a dictionary.
Args:
input_tuple: tuple to convert.
remove_world_observations: If True will remove non-player observations.
"""
return spaces.Dict({
agent_id: (utils.remove_world_observations_from_space(input_tuple[i])
if remove_world_observations else input_tuple[i])
for i, agent_id in enumerate(self._ordered_agent_ids)
})
def env_creator(env_config):
"""Outputs an environment for registering."""
env = substrate.build(config_dict.ConfigDict(env_config))
env = MeltingPotEnv(env)
return env
class RayModelPolicy(policy.Policy):
"""Policy wrapping an RLLib model for inference.
Note: Currently only supports a single input, batching is not enabled
"""
def __init__(self,
model: trainer.Trainer,
policy_id: str = DEFAULT_POLICY_ID) -> None:
"""Initialize a policy instance.
Args:
model: An rllib.trainer.Trainer checkpoint.
policy_id: Which policy to use (if trained in multi_agent mode)
"""
self._model = model
self._prev_action = 0
self._policy_id = policy_id
def step(self, timestep: dm_env.TimeStep,
prev_state: policy.State) -> Tuple[int, policy.State]:
"""See base class."""
observations = {
key: value
for key, value in timestep.observation.items()
if 'WORLD' not in key
}
action, state, _ = self._model.compute_single_action(
observations,
prev_state,
policy_id=self._policy_id,
prev_action=self._prev_action,
prev_reward=timestep.reward)
self._prev_action = action
return action, state
def initial_state(self) -> policy.State:
"""See base class."""
self._prev_action = 0
return self._model.get_policy(self._policy_id).get_initial_state()
def close(self) -> None:
"""See base class."""
| {
"content_hash": "8566a5114aaa2eef654ddd21cef166c0",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 80,
"avg_line_length": 31.27485380116959,
"alnum_prop": 0.6630516080777861,
"repo_name": "deepmind/meltingpot",
"id": "9310aaca377d3fa1fa0fecc424425db445977d21",
"size": "5943",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/rllib/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1732"
},
{
"name": "Lua",
"bytes": "674594"
},
{
"name": "Python",
"bytes": "1768669"
},
{
"name": "Shell",
"bytes": "2923"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sfc_models'
copyright = '2017, Brian Romanchuk'
author = 'Brian Romanchuk'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'sfc_models v0.2.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sfc_modelsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sfc_models.tex', 'sfc\\_models Documentation',
'Brian Romanchuk', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sfc_models', 'sfc_models Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sfc_models', 'sfc_models Documentation',
author, 'sfc_models', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| {
"content_hash": "1459ae4feb8674e97876dbf5f87540fb",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 80,
"avg_line_length": 28.425076452599388,
"alnum_prop": 0.685529854760624,
"repo_name": "brianr747/SFC_models",
"id": "171ef6ba24ce91873d69b6f891294d380c8d769d",
"size": "9981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "433125"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from django.db.models import Count
from ct.models import Response
class CorrectnessMeter(models.Model):
"""User answers correctness meter."""
PARTIALLY_CORRECT_ANSWER_POINT_REDUCTION = 0.9
CORRECT_ANSWER_POINTS = 1
NOT_CORRECT_ANSWER_POINTS = 0
NOT_CORRECT = 'not_correct'
CORRECT = 'correct'
PARTIALLY_CORRECT = 'partially_correct'
CORRECTNESS_CHOICES = (
(CORRECT, 'correct'),
(PARTIALLY_CORRECT, 'partially correct'),
(NOT_CORRECT, 'not correct'),
)
response = models.ForeignKey(Response)
correctness = models.CharField(choices=CORRECTNESS_CHOICES, max_length=25)
points = models.FloatField(default=0)
@classmethod
def get_user_answers_freq(cls, user, correctness=CORRECT):
total_user_answers = CorrectnessMeter.objects.filter(response__author=user)
try:
return total_user_answers.filter(correctness=correctness).count() / float(total_user_answers.count())
except ZeroDivisionError:
return
def __str__(self):
return "{} {} {}".format(self.response.author, self.correctness, self.points)
| {
"content_hash": "db5667e9febf08b0a85d82ae9be55c62",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 113,
"avg_line_length": 34.65714285714286,
"alnum_prop": 0.6826051112943117,
"repo_name": "raccoongang/socraticqs2",
"id": "bb446621fde09672c5587388748d12b95ccb0827",
"size": "1213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/grading/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189600"
},
{
"name": "Dockerfile",
"bytes": "580"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "342788"
},
{
"name": "JavaScript",
"bytes": "133425"
},
{
"name": "Makefile",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "1504025"
},
{
"name": "Shell",
"bytes": "1521"
}
],
"symlink_target": ""
} |
"""Implements a resource and task management multiprocessing.Process
based on BigJob.
"""
__author__ = "Ole Weidner"
__email__ = "[email protected]"
__copyright__ = "Copyright 2013-2014, The RADICAL Project at Rutgers"
__license__ = "MIT"
import saga
import time
import pilot
import Queue
import constants
import multiprocessing
from logger import logger
# ----------------------------------------------------------------------------
#
class _BigJobWorker(multiprocessing.Process):
# ------------------------------------------------------------------------
#
def __init__(self, resource_obj, ready_to_transfer_input_queue,
ready_to_exec_q, ready_to_transfer_output_q, done_q, failed_q):
"""DS
"""
# Multiprocessing stuff
multiprocessing.Process.__init__(self)
self.daemon = True
self._stop = False
# The resource object binds the worker to the public API & callbacks
self._res_obj = resource_obj
# BigJob handles
self._pilot_job = None
self._pilot_service = None
self._physical_tasks = []
# All queue an InputFileTransferWorker can access
self._tasks_done_q = done_q
self._tasks_failed_q = failed_q
self._tasks_ready_to_exec_q = ready_to_exec_q
self._tasks_ready_to_transfer_output_q = ready_to_transfer_output_q
self._tasks_ready_to_transfer_input_q = ready_to_transfer_input_queue
logger.info("Starting BigJobWorker using BigJob version %s" % pilot.version)
# ------------------------------------------------------------------------
#
@property
def log(self):
"""Returns the resource log.
"""
return self._res_obj['log']
# ------------------------------------------------------------------------
#
def stop(self):
"""DS
"""
self._stop = True
# ------------------------------------------------------------------------
#
def run(self):
"""DS
"""
start_time = time.time()
# First of all, the BigJobWorker needs to launch a BigJob
# instance on which it can schedule tasks that come in via
# the _tasks_ready_to_exec_q queue.
self._launch_bj()
while self._stop is False:
# Sometimes pilot jobs have the tendency not terminate
# properly. in this case, we monitor the runtime and terminate
# manually after the rumtime (+ some grace period) has expired
if time.time() - start_time >= (self._res_obj['runtime'] + 1) * 60:
self.stop()
continue
# Periodically, we check the status of our BigJob pilot object,
# translate the state and call the state change callbacks.
if self._res_obj['state'] not in [constants.DONE, constants.FAILED]:
# Obviously, we only do this if the object is not in a
# terminal state, i.e., 'Done' or 'Failed'.
self._update_bj()
# Periodically, we check the 'ready to execute queue'. If there's
# something in it, we convert it into a CU and launch it 'into'
# the BigJob pilot.
try:
task = self._tasks_ready_to_exec_q.get_nowait()
# New task ready to execute. Add to internal task list
self._physical_tasks.append({'task': task, 'cu': None})
#self._tasks_ready_to_transfer_output_q.put(task)
except Queue.Empty:
pass
# Periodically, we check the states of all running jobs, update
# and push things into the appropriate queues accordingly.
for pt in self._physical_tasks:
self._update_task_state(pt)
if pt['task'].state == constants.WAITING_FOR_EXECUTION:
pt['cu'] = self._schedule_cu(pt['task'])
elif pt['task'].state in [constants.WAITING_FOR_OUTPUT_TRANSFER]:
self._tasks_ready_to_transfer_output_q.put(pt['task'])
self._physical_tasks.remove(pt)
self._tasks_ready_to_exec_q.task_done()
elif pt['task'].state in [constants.DONE]:
# Task is done, i.e., there are no output files to
# transfer. Remove it.
self._tasks_done_q.put(pt['task'])
self._physical_tasks.remove(pt)
self._tasks_ready_to_exec_q.task_done()
elif pt['task'].state in [constants.FAILED]:
# Task has failed, so there's not much we can do except for
# removing it from the list of physical tasks
# we transfer task output even if the task has failed
#self._tasks_failed_q.put(pt['task'])
self._tasks_ready_to_transfer_output_q.put(pt['task'])
self._physical_tasks.remove(pt)
self._tasks_ready_to_exec_q.task_done()
elif pt['task'].state in [constants.PENDING]:
# Task has been started but is still pending execution.
# Not much to do at this point.
pass
# once we have left the main loop, we can cancel everything.
self._pilot_service.cancel()
# ------------------------------------------------------------------------
#
def _update_task_state(self, task):
"""DOCSTRING
"""
if task['cu'] is None:
# Task has no CU associated with it yet. Not much we can do.
return
else:
try:
new_cu_state = task['cu'].get_state().lower()
except Exception, ex:
task['task']._log.append(str(ex))
task['task']._set_state(constants.FAILED)
return
if new_cu_state in ['unknown', 'new']:
translated_state = constants.PENDING
elif new_cu_state == 'running':
translated_state = constants.RUNNING
elif new_cu_state == 'done':
if len(task['task'].output) > 0:
translated_state = constants.WAITING_FOR_OUTPUT_TRANSFER
else:
translated_state = constants.DONE
else:
error_msg = "BigJob returned CU state '%s'" % new_cu_state
task['task']._log.append(error_msg)
translated_state = constants.FAILED
task['task']._set_state(translated_state)
# ------------------------------------------------------------------------
#
def _set_state(self, new_state):
"""Starts a BigJob on the target machine.
"""
# do nothing if existing and new state are identical
if self._res_obj['state'] == new_state:
return
old_state = self._res_obj['state']
self._res_obj['state'] = new_state
for callback in self._res_obj['callbacks']:
callback(self, old_state, new_state)
# ------------------------------------------------------------------------
#
def _schedule_cu(self, task):
try:
wd = "%s/%s" % (self._res_obj['workdir'], task.dir_name)
cu_description = pilot.ComputeUnitDescription()
cu_description.executable = task.executable
cu_description.arguments = task.arguments
cu_description.environment = task.environment
cu_description.working_directory = wd
cu_description.number_of_processes = task.cores
cu_description.output = "STDOUT"
cu_description.error = "STDERR"
comp_unit = self._pilot_job.submit_compute_unit(cu_description)
task._set_state(constants.PENDING)
return comp_unit
except Exception, ex:
task._log.append(str(ex))
task._set_state(constants.FAILED)
return None
#task._set_state(constants.RUNNING)
#time.sleep(1)
#task._set_state(constants.WAITING_FOR_OUTPUT_TRANSFER)
#self._ready_to_transfer_output_queue.put(task)
# ------------------------------------------------------------------------
#
def _launch_bj(self):
"""Starts a BigJob on the target machine.
"""
try:
# Try to create the working directory. If This fails, we set
# our state to 'Failed'.
d = saga.filesystem.Directory(self._res_obj['remote_workdir_url'], saga.filesystem.CREATE_PARENTS)
d.close()
except Exception, ex:
self._res_obj['log'].append(str(ex))
self._set_state(constants.FAILED)
return
try:
# Create pilot description & launch the BigJob
pilot_description = pilot.PilotComputeDescription()
# we construct a service url as username@host
service_url = saga.Url(self._res_obj['resource']['jobmgr_url'])
if self._res_obj['username'] is not None:
service_url.set_username(self._res_obj['username'] )
service_url = str(service_url)
pilot_description.service_url = service_url
pilot_description.number_of_processes = self._res_obj['cores']
pilot_description.walltime = self._res_obj['runtime']
if self._res_obj['project_id'] is not None:
pilot_description.project = self._res_obj['project_id']
if self._res_obj['queue'] == constants.DEFAULT:
pilot_description.queue = self._res_obj['resource']['jobmgr_queue']
else:
pilot_description.queue = self._res_obj['queue']
url = saga.Url(self._res_obj['resource']['shared_fs_url'])
url.path = self._res_obj['workdir']
pilot_description.working_directory = url.path
if 'spmd_variation' in self._res_obj['resource']:
pilot_description.spmd_variation = self._res_obj['resource']['spmd_variation']
# Connect to REDIS, create Pilot Compute Service
redis_url = "redis://%s@%s" % (
self._res_obj['resource']['redis_pwd'],
self._res_obj['resource']['redis_host']
)
self._res_obj['log'].append("Connecting to REDIS server at %s" % \
self._res_obj['resource']['redis_host'])
self._pilot_service = pilot.PilotComputeService(redis_url)
# Launch Pilot Job
self._res_obj['log'].append("Launching Pilot Job: %s" % str(pilot_description))
self._pilot_job = self._pilot_service.create_pilot(pilot_description)
except Exception, ex:
# something went wrong. append the exception to the log
# and call the callbacks.
self._res_obj['log'].append(str(ex))
self._set_state(constants.FAILED)
self._set_state(constants.PENDING)
# ------------------------------------------------------------------------
#
def _update_bj(self):
try:
state = self._pilot_job.get_state().lower()
except Exception, ex:
self._res_obj['log'].append(str(ex))
self._set_state(constants.FAILED)
return
# Translate BigJob states into our own states.
if state in ['unknown', 'new']:
translated_state = constants.PENDING
elif state == 'running':
translated_state = constants.RUNNING
elif state == 'done':
translated_state = constants.DONE
else:
error_msg = "BigJob returned state '%s'" % state
self._res_obj['log'].append(error_msg)
translated_state = constants.FAILED
self._set_state(translated_state)
| {
"content_hash": "66e9dcc5313e8e5627bf9844856d6e9a",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 110,
"avg_line_length": 38.46666666666667,
"alnum_prop": 0.5125030948254519,
"repo_name": "radical-cybertools/BigJobAsync",
"id": "afa91b3303afc5376fdc1ffe52c2afc50921efbd",
"size": "12140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigjobasync/big_job_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97581"
}
],
"symlink_target": ""
} |
from datetime import date
from django.conf import settings
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
# Ensure results are consistent across DB backends
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
value = (six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp))
hash = salted_hmac(key_salt, value).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| {
"content_hash": "300faf7941e4618fb086cd9723390048",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 112,
"avg_line_length": 37.04054054054054,
"alnum_prop": 0.6165632980663991,
"repo_name": "diego-d5000/MisValesMd",
"id": "ed7f3a7b219a277e9676b31387a0fec3a8fcee0f",
"size": "2741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/django/contrib/auth/tokens.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115465"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "1415583"
},
{
"name": "JavaScript",
"bytes": "1381588"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "8107650"
},
{
"name": "Shell",
"bytes": "11786"
}
],
"symlink_target": ""
} |
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import full_model_inference, pivot_plot
from selection.learning.core import normal_sampler, random_forest_fit_sk
from selection.learning.learners import mixture_learner
mixture_learner.scales = [1]*10 + [1.5,2,3,4,5,10]
def BHfilter(pval, q=0.2):
pval = np.asarray(pval)
pval_sort = np.sort(pval)
comparison = q * np.arange(1, pval.shape[0] + 1.) / pval.shape[0]
passing = pval_sort < comparison
if passing.sum():
thresh = comparison[np.nonzero(passing)[0].max()]
return np.nonzero(pval <= thresh)[0]
return []
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=1000):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
def meta_algorithm(XTX, XTXi, dispersion, lam, sampler):
global counter
p = XTX.shape[0]
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=XTX)
pen = rr.l1norm(p, lagrange=lam)
scale = 0.
noisy_S = sampler(scale=scale)
soln = XTXi.dot(noisy_S)
solnZ = soln / (np.sqrt(np.diag(XTXi)) * np.sqrt(dispersion))
pval = ndist.cdf(solnZ)
pval = 2 * np.minimum(pval, 1 - pval)
return set(BHfilter(pval, q=0.2))
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, dispersion, lam)
# run selection algorithm
return full_model_inference(X,
y,
truth,
selection_algorithm,
smooth_sampler,
success_params=(1, 1),
B=B,
fit_probability=random_forest_fit_sk,
fit_args={'n_estimators':5000})
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(500):
df = simulate(B=40000)
csvfile = 'random_forest_targets_BH.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
| {
"content_hash": "b39085cab454dac3676bfff433295413",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 87,
"avg_line_length": 33.265306122448976,
"alnum_prop": 0.5128834355828221,
"repo_name": "selective-inference/selective-inference",
"id": "c7832c5eac251af53e632a16e3f763216abe5e16",
"size": "3260",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doc/learning_examples/BH/random_forest_targets_BH.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "269"
},
{
"name": "C++",
"bytes": "13148"
},
{
"name": "Python",
"bytes": "572490"
},
{
"name": "R",
"bytes": "11134"
},
{
"name": "TeX",
"bytes": "3355"
}
],
"symlink_target": ""
} |
import pdb
import datetime
import operator
import json
import logging
import orange
import Orange
import orngTree
import orngStat
import orngTest
import orngDisc
import Orange.feature as orf
import numpy as np
from datetime import datetime, date, timedelta
from datetime import time as dttime
from collections import deque, defaultdict
from dateutil.parser import parse as dateparse
from scorpionsql.db import *
from scorpionsql.sql import *
from scorpionsql.sqlparser import *
from scorpionsql.aggerror import *
from scorpion.arch import *
from scorpion.util import *
from scorpion.settings import *
class SharedObj(object):
def __init__(
self, db,
errors=[],
goodkeys={},
ignore_attrs=[],
schema=[],
dbname=None,
parsed=None,
params=[],
**kwargs):
if not db and not dbname:
raise "SharedObj requires a database connection!"
self.db = db or connect(dbname)
dbname = dbname or str(self.db.url).split("/")[-1]
self.monetdb = connect(dbname, engine='monet')
self.dbname = dbname
self.parsed = parsed
self.params = params # parameters for parsed SQL object
self.errors = errors
self.goodkeys = goodkeys or {}
self.schema = schema or db_schema(db, self.parsed.tables[0])
# aggregate type -> {groupby key -> ids of "bad" tuples}
self.ignore_attrs = ignore_attrs
self.merged_tables = {}
self.rules = {}
self.top_k_rules = {}
self.clauses = {}
self.c = kwargs.get('c', 0.3)
# created by server to track status of scorpion
# processing
# should be set when creating SharedObj
self.status = None
if not self.parsed:
raise Error("expected a parsed SQL object!")
if len(self.parsed.fr) > 1:
# XXX: only support single table queries
raise "Don't support joins yet!"
def clone(self):
return SharedObj(
self.db,
parsed=self.parsed,
dbname=self.dbname,
errors=self.errors,
goodkeys=self.goodkeys,
ignore_attrs=self.ignore_attrs,
schema=self.schema,
params=self.params
)
def get_tuples(self, keys, attrs=None):
try:
if keys is None or not len(list(keys)):
return []
except:
pass
attrs = attrs or self.rules_schema
return [list(row) for row in self.get_filter_rows(keys=keys, attrs=attrs)]
def get_filter_rows(self, keys=None, attrs=None, where=None):
"""
Need to deal with keys and such outside of function
"""
qobj = self.parsed.get_filter_qobj(keys=keys)
if attrs:
qobj.select = Select(attrs)
if where:
qobj.where.append(where)
params = list(self.params)
if keys:
params.append(tuple(list(keys)))
return query(self.db, str(qobj), [params])
def get_rules_schema(self):
"""
"""
invalid_types = [date, datetime, dttime]
used_attrs = set()
for selexpr in self.parsed.select:
used_attrs.update(selexpr.cols)
schema = dict(filter(lambda p: p[1] not in invalid_types, self.schema.iteritems()))
ret = set(schema.keys()).difference(used_attrs)
ret.add('id')
return ret
def update_status(self, s):
if self.status:
self.status.update_status(s)
def update_rules(self, label, rules):
if self.status:
self.status.update_rules(label, rules)
attrnames = property(lambda self: self.schema.keys())
rules_schema = property(get_rules_schema)
sql = property(lambda self: str(self.parsed))
prettify_sql = property(lambda self: self.parsed.prettify())
filter = property(lambda self: self.parsed.get_filter_qobj())
def create_sharedobj(dbname, sql, badresults, goodresults, errtype):
from arch import get_provenance
db = connect(dbname)
parsed = parse_sql(sql)
obj = SharedObj(db, parsed=parsed, dbname=dbname)
qobj = obj.parsed
nonagg = qobj.select.nonaggs[0]
try:
xcol = nonagg.cols[0]
col_type = db_type(db, qobj.fr, xcol)
except:
col_type = None
# assumes every aggregate has the same bad keys
badresults = extract_agg_vals(badresults, col_type)
goodresults = extract_agg_vals(goodresults)
errors = []
for agg in qobj.select.aggregates:
aggerr = AggErr(agg, badresults, 20, errtype, {'erreq' : None})
errors.append(aggerr)
label = agg.shortname
obj.goodkeys[label] = goodresults
obj.errors = errors
table = get_provenance(obj, obj.errors[0].agg.cols, obj.errors[0].keys)
return obj, table
def extract_agg_vals(vals, col_type=None):
fmts = [
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M',
'%Y-%m-%dT%H'
]
for fmt in fmts:
try:
ret = [datetime.strptime(val, fmt) for val in vals]
print vals
if col_type == 'date':
ret = [d.date() for d in ret]
elif 'Z' in fmt:
#ret = [d - timedelta(hours=5) for d in ret] # compensate for 'Z' +4 timezone
pass
return ret
except Exception as e:
pass
try:
ret = [datetime.strptime(val, '%Y-%m-%d').date() for val in vals]
return ret
except Exception as ee:
print ee
return vals
| {
"content_hash": "075e711cdf01749e797e529085389190",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 87,
"avg_line_length": 24.67142857142857,
"alnum_prop": 0.6404169079328315,
"repo_name": "sirrice/scorpion",
"id": "a2ed1a03435b835fcc9eb787a9aefa0eb1a6ad91",
"size": "5181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scorpion/sharedobj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "389180"
},
{
"name": "R",
"bytes": "3041"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
""" Python test discovery, setup and run of test functions. """
import fnmatch
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
import _pytest
cutdir = py.path.local(_pytest.__file__).dirpath()
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# this misspelling is common - raise a specific error to alert the user
if hasattr(metafunc.function, 'parameterize'):
msg = "{0} has 'parameterize', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.mark.trylast
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.mark.trylast
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.mark.hookwrapper
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.classnamefilter(name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.funcnamefilter(name) and hasattr(obj, "__call__") and\
getfixturemarker(obj) is None:
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
try:
methods = [module.pytest_generate_tests]
except AttributeError:
methods = []
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
self.ihook.pytest_generate_tests.callextra(methods, metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
mark(funcobj)
else:
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
try:
mod = self.fspath.pyimport(ensuresyspath=True)
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if inspect.getargspec(setup_module)[0]:
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if inspect.getargspec(fin)[0]:
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(self.obj)
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=cutdir)
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtype, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
getattr(self, valtype)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect=True to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of simple values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: if True each argvalue corresponding to an argname will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
if not indirect:
#XXX should we also check for the opposite case?
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(
self.function, arg))
valtype = indirect and "params" or "funcargs"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtype, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
session.perform_collect()
curdir = py.path.local()
if session.items:
nodeid = session.items[0].nodeid
else:
part = session._initialparts[0]
nodeid = "::".join(map(str, [curdir.bestrelpath(part[0])] + part[1:]))
nodeid.replace(session.fspath.sep, "/")
tw = py.io.TerminalWriter()
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname in fm._arg2fixturedefs:
fixturedefs = fm.getfixturedefs(argname, nodeid)
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(ExpectedException, *args, **kwargs):
""" assert that a code block/function call raises @ExpectedException
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if ExpectedException is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://bitbucket.org/pytest-dev/pytest/issue/176/pytestraises
from _pytest.assertion.util import BuiltinAssertionError \
as ExpectedException
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(ExpectedException, tuple):
for exc in ExpectedException:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif not inspect.isclass(ExpectedException):
raise TypeError(msg % type(ExpectedException))
if not args:
return RaisesContext(ExpectedException)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except ExpectedException:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except ExpectedException:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, ExpectedException):
self.ExpectedException = ExpectedException
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.ExpectedException)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
"""add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = inspect.formatargspec(*inspect.getargspec(factory))
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(function)
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._seenplugins = set()
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
### XXX this hook should be called for historic events like pytest_configure
### so that we don't have to do the below pytest_configure hook
def pytest_plugin_registered(self, plugin):
if plugin in self._seenplugins:
return
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
self._seenplugins.add(plugin)
@pytest.mark.tryfirst
def pytest_configure(self, config):
plugins = config.pluginmanager.getplugins()
for plugin in plugins:
self.pytest_plugin_registered(plugin)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
if argname not in func_params and argname not in func_params[0]:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
if self.unittest:
result = self.func(request.instance, **kwargs)
else:
fixturefunc = self.func
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
| {
"content_hash": "e9877f12623b6b78119f99c23b474922",
"timestamp": "",
"source": "github",
"line_count": 2071,
"max_line_length": 108,
"avg_line_length": 39.022694350555284,
"alnum_prop": 0.6011309641655118,
"repo_name": "gauribhoite/personfinder",
"id": "2fa4af3732f2bf3cc3e9a49070ea2b4bd9926c01",
"size": "80816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/site-packages/pytest-2.7.2-py2.7.egg/_pytest/python.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "423"
},
{
"name": "Batchfile",
"bytes": "5005"
},
{
"name": "C",
"bytes": "413819"
},
{
"name": "CSS",
"bytes": "330448"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "HTML",
"bytes": "720955"
},
{
"name": "JavaScript",
"bytes": "1072023"
},
{
"name": "Makefile",
"bytes": "16086"
},
{
"name": "PHP",
"bytes": "2582470"
},
{
"name": "Python",
"bytes": "60243792"
},
{
"name": "Shell",
"bytes": "7491"
},
{
"name": "TeX",
"bytes": "60219"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
from distutils.spawn import find_executable
import os
from setuptools import setup, find_packages
from dockerfabric import __version__
def include_readme():
try:
import pandoc
except ImportError:
return ''
pandoc.core.PANDOC_PATH = find_executable('pandoc')
readme_file = os.path.join(os.path.dirname(__file__), 'README.md')
doc = pandoc.Document()
with open(readme_file, 'r') as rf:
doc.markdown = rf.read()
return doc.rst
setup(
name='docker-fabric',
version=__version__,
packages=find_packages(),
install_requires=['six', 'Fabric>=1.8.0', 'docker-py>=1.9.0', 'docker-map>=0.8.0b2'],
extras_require={
'yaml': ['PyYAML'],
},
license='MIT',
author='Matthias Erll',
author_email='[email protected]',
url='https://github.com/merll/docker-fabric',
description='Build Docker images, and run Docker containers in Fabric.',
long_description=include_readme(),
platforms=['OS Independent'],
keywords=['docker', 'fabric'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'Topic :: System :: Software Distribution',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
],
include_package_data=True,
)
| {
"content_hash": "90bd0b2fa3e3f18eac1a4fd083d43a8f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 89,
"avg_line_length": 31.8,
"alnum_prop": 0.6226415094339622,
"repo_name": "merll/docker-fabric",
"id": "c783d245ec175a0c3d89da54bb93a70a9308f737",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86731"
}
],
"symlink_target": ""
} |
import functools
import timeit
from typing import Optional
import jax
from jax import lax
import jax.numpy as jnp
from jax._src.lax.control_flow.for_loop import for_loop
import numpy as np
import jax_triton as jt
from jax_triton import pallas as pl
def layer_norm_forward_kernel(
x_ref, weight_ref, bias_ref, # Input arrays
o_ref, mean_ref=None, rstd_ref=None, # Output arrays
*, eps: float, block_size: int):
row_idx = pl.program_id(axis=0)
def mean_body(i, acc_ref):
col_idx = i * block_size + jnp.arange(block_size)
mask = col_idx < x_ref.shape[1]
a = pl.load(x_ref, (row_idx, col_idx), mask=mask, other=0.,
eviction_policy="evict_last").astype(jnp.float32)
acc_ref[:] += a
mean = for_loop(jt.cdiv(x_ref.shape[1], block_size), mean_body,
jnp.zeros(block_size)).mean(axis=0)
def var_body(i, acc_ref):
col_idx = i * block_size + jnp.arange(block_size)
mask = col_idx < x_ref.shape[1]
a = pl.load(x_ref, (row_idx, col_idx), mask=mask, other=0.,
eviction_policy="evict_last").astype(jnp.float32)
a = jnp.where(mask, a - mean, 0.)
acc_ref[:] += a * a
var = for_loop(jt.cdiv(x_ref.shape[1], block_size), var_body,
jnp.zeros(block_size)).mean()
rstd = 1 / jnp.sqrt(var + eps)
if mean_ref is not None:
mean_ref[row_idx] = mean
if rstd_ref is not None:
rstd_ref[row_idx] = rstd
def body(i, _):
col_idx = i * block_size + jnp.arange(block_size)
mask = col_idx < x_ref.shape[1]
weight = pl.load(weight_ref, (col_idx,), mask=mask)
bias = pl.load(bias_ref, (col_idx,), mask=mask)
x = pl.load(x_ref, (row_idx, col_idx), mask=mask, other=0.,
eviction_policy="evict_first").astype(jnp.float32)
out = (x - mean) * rstd * weight + bias
pl.store(o_ref, (row_idx, col_idx), out.astype(o_ref.dtype), mask=mask)
for_loop(jt.cdiv(x_ref.shape[1], block_size), body, ())
@functools.partial(jax.jit, static_argnames=["num_warps", "num_stages",
"num_stages", "eps", "interpret"])
def layer_norm(
x, weight, bias,
num_warps: Optional[int] = None,
num_stages: Optional[int] = 3,
eps: float = 1e-5,
interpret: bool = False):
m, n = x.shape
# Triton heuristics
# Less than 64KB per feature: enqueue fused kernel
max_fused_size = 65536 // x.dtype.itemsize
block_size = min(max_fused_size, jt.next_power_of_2(n))
block_size = min(max(block_size, 128), 4096)
num_warps = min(max(block_size // 256, 1), 8)
grid = m # one thread per row
kernel = functools.partial(layer_norm_forward_kernel, eps=eps,
block_size=block_size)
out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype)
return pl.pallas_call(kernel, num_warps=num_warps, num_stages=num_stages,
grid=grid, out_shape=out_shape, debug=False,
interpret=interpret)(x, weight, bias)
@functools.partial(jax.jit, static_argnames=["eps"])
def layer_norm_reference(x, weight, bias, *, eps: float = 1e-5):
mean = jnp.mean(x, axis=1)
mean2 = jnp.mean(jnp.square(x), axis=1)
var = jnp.maximum(0., mean2 - jnp.square(mean))
y = x - mean[:, None]
mul = lax.rsqrt(var + eps)
return y * mul[:, None] * weight[None] + bias[None]
if __name__ == "__main__":
dtype = jnp.float32
m, n = 4096, 4096
weight_key, bias_key, x_key = jax.random.split(jax.random.PRNGKey(0), 3)
weight = jax.random.normal(weight_key, (n,), dtype=dtype)
bias = jax.random.normal(bias_key, (n,), dtype=dtype)
x = jax.random.normal(x_key, (m, n), dtype=dtype)
out = layer_norm(x, weight, bias)
out_ref = layer_norm_reference(x, weight, bias)
np.testing.assert_allclose(out, out_ref, atol=0.03, rtol=0.03)
n_trials = 1000
duration = timeit.timeit(lambda: layer_norm(x, weight, bias).block_until_ready(),
number=n_trials)
print(f"Fused Layer Norm: {duration / n_trials * 1000:.2f}ms")
duration = timeit.timeit(lambda: layer_norm_reference(x, weight, bias).block_until_ready(),
number=n_trials)
print(f"Reference Layer Norm: {duration / n_trials * 1000:.2f}ms")
| {
"content_hash": "68a084fc324be1c095bc90137cf66a2a",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 93,
"avg_line_length": 37.5625,
"alnum_prop": 0.6173044925124792,
"repo_name": "jax-ml/jax-triton",
"id": "c4da2cfd5a98549f8b51f4f3dbe6eb57706727f1",
"size": "4795",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/pallas/layer_norm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6853"
},
{
"name": "Python",
"bytes": "152495"
}
],
"symlink_target": ""
} |
import re
import numpy as np
import pandas as pd
from scipy import sparse
from scipy.stats import pearsonr
from scattertext.representations import Word2VecFromParsedCorpus
class EmbeddingsResolver:
def __init__(self, corpus):
self.corpus_ = corpus
self.embeddings_ = None
self.word2vec_model_ = None
def set_embeddings(self, embeddings):
'''
Specifies fixed set of embeddings
:param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms)
:return: EmbeddingsResolver
'''
if self.embeddings_ is not None:
raise Exception("You have already set embeddings by running set_embeddings or set_embeddings_model.")
assert embeddings.shape[1] == self.corpus_.get_num_terms()
self.embeddings_ = embeddings.T
self.vocab_ = self.corpus_.get_terms()
return self
def set_embeddings_model(self, model=None, term_acceptance_re=re.compile('[a-z]{3,}')):
'''
:param model: gensim word2vec.Word2Vec model
:param term_acceptance_re : SRE_Pattern, Regular expression to identify
valid terms, default re.compile('[a-z]{3,}')
:return: EmbeddingsResolver
'''
if self.embeddings_ is not None:
raise Exception("You have already set embeddings by running set_embeddings or set_embeddings_model.")
self.word2vec_model_ = model
if term_acceptance_re is not None:
acceptable_terms = set([t for t in self.corpus_.get_terms() if term_acceptance_re.match(t)])
else:
acceptable_terms = set(self.corpus_.get_terms())
model = Word2VecFromParsedCorpus(self.corpus_, model).train()
self.corpus_ = self.corpus_.remove_terms(set(self.corpus_.get_terms()) - acceptable_terms)
weight_list = [model[word] for word in model.wv.key_to_index.keys()]
self.embeddings_ = np.stack(weight_list)
self.vocab_ = list(model.wv.key_to_index.keys())
return self
def project_embeddings(self, projection_model=None, x_dim=0, y_dim=1):
'''
:param projection_model: sklearn unsupervised model (e.g., PCA) by default the recommended model is umap.UMAP,
which requires UMAP in to be installed
:param x_dim: int, default 0, dimension of transformation matrix for x-axis
:param y_dim: int, default 1, dimension of transformation matrix for y-axis
:return:
'''
axes = self.project(projection_model)
word_axes = (pd.DataFrame({'term': [w for w in self.vocab_],
'x': axes.T[x_dim],
'y': axes.T[y_dim]})
.set_index('term')
.reindex(pd.Series(self.corpus_.get_terms()))
.dropna())
self.corpus_ = self.corpus_.remove_terms(set(self.corpus_.get_terms()) - set(word_axes.index))
word_axes = word_axes.reindex(self.corpus_.get_terms()).dropna()
return self.corpus_, word_axes
'''
def get_svd(self, num_dims, category):
U, s, V = sparse.linalg.svds(self.corpus_._X.astype('d'), k=num_dims)
Y = self.corpus_.get_category_ids() == category
[pearsonr(U.T[i], ) for i in range(num_dims)]
'''
def project(self, projection_model=None):
'''
:param projection_model: sklearn unsupervised model (e.g., PCA) by default the recommended model is umap.UMAP,
which requires UMAP in to be installed
:return: array, shape (num dimension, vocab size)
'''
if self.embeddings_ is None:
raise Exception("Run set_embeddings_model or set_embeddings to get embeddings")
if projection_model is None:
try:
import umap
except:
raise Exception("Please install umap (pip install umap-learn) to use the default projection_model.")
projection_model = umap.UMAP(min_dist=0.5, metric='cosine')
axes = projection_model.fit_transform(self.embeddings_)
return axes
| {
"content_hash": "8d56cfd66eb506a06b2f852c4e0d2900",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 118,
"avg_line_length": 42.916666666666664,
"alnum_prop": 0.6135922330097088,
"repo_name": "JasonKessler/scattertext",
"id": "fc1cd8b6560fad955bdddff3272bbdeb21d7883c",
"size": "4120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scattertext/representations/EmbeddingsResolver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "52028"
},
{
"name": "JavaScript",
"bytes": "497904"
},
{
"name": "Python",
"bytes": "1183530"
},
{
"name": "Shell",
"bytes": "306"
}
],
"symlink_target": ""
} |
from m5.objects.ArmCPU import ArmMinorCPU
MinorCPU = ArmMinorCPU
| {
"content_hash": "24ad282cd8f597bf6be0344c2e174771",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 22,
"alnum_prop": 0.8333333333333334,
"repo_name": "gem5/gem5",
"id": "bac019774e758b20b8810aa6212f15555bd9edd7",
"size": "1558",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "src/arch/arm/MinorCPU.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import six
import numpy as np
import math
import argparse
import paddle
import paddle.fluid as fluid
import time
import utils
import net
SEED = 102
def parse_args():
parser = argparse.ArgumentParser("TagSpace benchmark.")
parser.add_argument(
'--neg_size', type=int, default=3, help='neg/pos ratio')
parser.add_argument(
'--train_dir', type=str, default='train_data', help='train file address')
parser.add_argument(
'--vocab_text_path', type=str, default='vocab_text.txt', help='vocab_text file address')
parser.add_argument(
'--vocab_tag_path', type=str, default='vocab_tag.txt', help='vocab_text file address')
parser.add_argument(
'--is_local', type=int, default=1, help='whether local')
parser.add_argument(
'--model_dir', type=str, default='model_', help='model dir')
parser.add_argument(
'--batch_size', type=int, default=5, help='num of batch size')
parser.add_argument(
'--print_batch', type=int, default=10, help='num of print batch')
parser.add_argument(
'--pass_num', type=int, default=10, help='num of epoch')
parser.add_argument(
'--use_cuda', type=int, default=0, help='whether use gpu')
parser.add_argument(
'--base_lr', type=float, default=0.01, help='learning rate')
parser.add_argument(
'--num_devices', type=int, default=1, help='Number of GPU devices')
parser.add_argument(
'--role', type=str, default='pserver', help='trainer or pserver')
parser.add_argument(
'--endpoints', type=str, default='127.0.0.1:6000', help='The pserver endpoints, like: 127.0.0.1:6000, 127.0.0.1:6001')
parser.add_argument(
'--current_endpoint', type=str, default='127.0.0.1:6000', help='The current_endpoint')
parser.add_argument(
'--trainer_id', type=int, default=0, help='trainer id ,only trainer_id=0 save model')
parser.add_argument(
'--trainers', type=int, default=1, help='The num of trianers, (default: 1)')
args = parser.parse_args()
return args
def get_cards(args):
return args.num_devices
def train():
""" do training """
args = parse_args()
train_dir = args.train_dir
vocab_text_path = args.vocab_text_path
vocab_tag_path = args.vocab_tag_path
use_cuda = True if args.use_cuda else False
batch_size = args.batch_size
neg_size = args.neg_size
vocab_text_size, vocab_tag_size, train_reader = utils.prepare_data(
file_dir=train_dir, vocab_text_path=vocab_text_path,
vocab_tag_path=vocab_tag_path, neg_size=neg_size,
batch_size=batch_size * get_cards(args),
buffer_size=batch_size*100, is_train=True)
""" train network """
# Train program
avg_cost, correct, cos_pos = net.network(vocab_text_size, vocab_tag_size, neg_size=neg_size)
# Optimization to minimize lost
sgd_optimizer = fluid.optimizer.SGD(learning_rate=args.base_lr)
sgd_optimizer.minimize(avg_cost)
def train_loop(main_program):
# Initialize executor
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
pass_num = args.pass_num
model_dir = args.model_dir
fetch_list = [avg_cost.name]
exe.run(fluid.default_startup_program())
total_time = 0.0
for pass_idx in range(pass_num):
epoch_idx = pass_idx + 1
print("epoch_%d start" % epoch_idx)
t0 = time.time()
for batch_id, data in enumerate(train_reader()):
lod_text_seq = utils.to_lodtensor([dat[0] for dat in data], place)
lod_pos_tag = utils.to_lodtensor([dat[1] for dat in data], place)
lod_neg_tag = utils.to_lodtensor([dat[2] for dat in data], place)
loss_val, correct_val = exe.run(
feed={
"text": lod_text_seq,
"pos_tag": lod_pos_tag,
"neg_tag": lod_neg_tag},
fetch_list=[avg_cost.name, correct.name])
if batch_id % args.print_batch == 0:
print("TRAIN --> pass: {} batch_num: {} avg_cost: {}, acc: {}"
.format(pass_idx, (batch_id+10) * batch_size, np.mean(loss_val),
float(np.sum(correct_val)) / batch_size))
t1 = time.time()
total_time += t1 - t0
print("epoch:%d num_steps:%d time_cost(s):%f" %
(epoch_idx, batch_id, total_time / epoch_idx))
save_dir = "%s/epoch_%d" % (model_dir, epoch_idx)
feed_var_names = ["text", "pos_tag"]
fetch_vars = [cos_pos]
fluid.io.save_inference_model(save_dir, feed_var_names, fetch_vars, exe)
print("finish training")
if args.is_local:
print("run local training")
train_loop(fluid.default_main_program())
else:
print("run distribute training")
t = fluid.DistributeTranspiler()
t.transpile(args.trainer_id, pservers=args.endpoints, trainers=args.trainers)
if args.role == "pserver":
print("run psever")
pserver_prog = t.get_pserver_program(args.current_endpoint)
pserver_startup = t.get_startup_program(args.current_endpoint,
pserver_prog)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(pserver_startup)
exe.run(pserver_prog)
elif args.role == "trainer":
print("run trainer")
train_loop(t.get_trainer_program())
if __name__ == "__main__":
train()
| {
"content_hash": "0ec13670acabd5feb367e0d93e359ab0",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 126,
"avg_line_length": 41.90510948905109,
"alnum_prop": 0.5840445915345759,
"repo_name": "kuke/models",
"id": "96cdf615ed9e6673159db5163f33f431d5c6e9bc",
"size": "5741",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/PaddleRec/tagspace/cluster_train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
} |
import torch
from torch.autograd import Variable
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)
# Use the nn package to define our model as a sequence of layers. nn.Sequential
# is a Module which contains other Modules, and applies them in sequence to
# produce its output. Each Linear Module computes output from input using a
# linear function, and holds internal Variables for its weight and bias.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
# The nn package also contains definitions of popular loss functions; in this
# case we will use Mean Squared Error (MSE) as our loss function.
loss_fn = torch.nn.MSELoss(size_average=False)
learning_rate = 1e-4
for t in range(500):
# Forward pass: compute predicted y by passing x to the model. Module objects
# override the __call__ operator so you can call them like functions. When
# doing so you pass a Variable of input data to the Module and it produces
# a Variable of output data.
y_pred = model(x)
# Compute and print loss. We pass Variables containing the predicted and true
# values of y, and the loss function returns a Variable containing the
# loss.
loss = loss_fn(y_pred, y)
print(t, loss.data[0])
# Zero the gradients before running the backward pass.
model.zero_grad()
# Backward pass: compute gradient of the loss with respect to all the learnable
# parameters of the model. Internally, the parameters of each Module are stored
# in Variables with requires_grad=True, so this call will compute gradients for
# all learnable parameters in the model.
loss.backward()
# Update the weights using gradient descent. Each parameter is a Variable, so
# we can access its data and gradients like we did before.
for param in model.parameters():
param.data -= learning_rate * param.grad.data
| {
"content_hash": "e97d9e32e69f6c3293dca01084339f61",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 83,
"avg_line_length": 41.73076923076923,
"alnum_prop": 0.7211981566820277,
"repo_name": "floydhub/dockerfiles",
"id": "2ebd4aaab063069e659f1ed8cbe11d96bf13cc80",
"size": "2215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dl/pytorch/tests/nn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1824"
},
{
"name": "HTML",
"bytes": "26359"
},
{
"name": "Lua",
"bytes": "17018"
},
{
"name": "Python",
"bytes": "200422"
},
{
"name": "Shell",
"bytes": "22576"
}
],
"symlink_target": ""
} |
from deep_qa.data.tokenizers.word_tokenizer import WordTokenizer
class TestTokenizer:
tokenizer = WordTokenizer({})
passage = "On January 7, 2012, Beyoncé gave birth to her first child, a daughter, Blue Ivy " +\
"Carter, at Lenox Hill Hospital in New York. Five months later, she performed for four " +\
"nights at Revel Atlantic City's Ovation Hall to celebrate the resort's opening, her " +\
"first performances since giving birth to Blue Ivy."
def test_char_span_to_token_span_handles_easy_cases(self):
# "January 7, 2012"
token_span = self.tokenizer.char_span_to_token_span(self.passage, (3, 18))
assert token_span == (1, 5)
# "Lenox Hill Hospital"
token_span = self.tokenizer.char_span_to_token_span(self.passage, (91, 110))
assert token_span == (22, 25)
# "Lenox Hill Hospital in New York."
token_span = self.tokenizer.char_span_to_token_span(self.passage, (91, 123))
assert token_span == (22, 29)
| {
"content_hash": "93ea933a70c32850acf899048932850a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 99,
"avg_line_length": 50.75,
"alnum_prop": 0.6571428571428571,
"repo_name": "RTHMaK/RPGOne",
"id": "4d2dbefde187c7e79936018a628a6ba6100d7ecc",
"size": "1060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_qa-master/tests/data/tokenizers/tokenizer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Batchfile",
"bytes": "15029"
},
{
"name": "CSS",
"bytes": "41709"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Go",
"bytes": "287"
},
{
"name": "HTML",
"bytes": "633076"
},
{
"name": "JavaScript",
"bytes": "1128791"
},
{
"name": "Jupyter Notebook",
"bytes": "927247"
},
{
"name": "Makefile",
"bytes": "31756"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Matlab",
"bytes": "9454"
},
{
"name": "PHP",
"bytes": "708541"
},
{
"name": "PowerShell",
"bytes": "68503"
},
{
"name": "Python",
"bytes": "2278740"
},
{
"name": "Ruby",
"bytes": "1136"
},
{
"name": "Shell",
"bytes": "62555"
},
{
"name": "Smarty",
"bytes": "5752"
},
{
"name": "TeX",
"bytes": "34544"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='gimble',
author='Mike Douglas',
author_email='[email protected]',
version='1.0.1',
description='XMPP Chat Bot',
long_description='',
entry_points = {
'console_scripts': [
'gimble = gimble.robot:main',
],
},
packages=find_packages(),
install_requires=[
'SleekXMPP',
'requests',
'BeautifulSoup4',
]) | {
"content_hash": "fbbea8dfa4c01b94ae3328255c936ef0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 43,
"avg_line_length": 18,
"alnum_prop": 0.5511111111111111,
"repo_name": "mike-douglas/gimble",
"id": "bce31136856f9cdc5d5953bb7cac16ac686001da",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14088"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_minus.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "c15813895953a91afa9515bff47d7c04",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7074235807860262,
"repo_name": "jacobb/dj_minus_dj",
"id": "50977e8721725e4569f128f5ca546be03cd2333e",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dj_minus/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8639"
}
],
"symlink_target": ""
} |
import json
import os
import time
def getSettings(mainfile_dir):
try:
with open(mainfile_dir + '/settings.cbs','r') as f:
return json.load(f)
except FileNotFoundError:
restoreDefaultSettings(mainfile_dir)
with open(mainfile_dir + '/settings.cbs', 'r') as f:
return json.load(f)
except Exception as e:
print("Error getting settings: " + str(e))
def getDefaultSettings():
defaults = '{\
"codebooks":{},\
"open_codebooks":[],\
"focused_codebook":-1,\
"open_entry":-1\
}'
return json.loads(defaults)
def save_settings(mainfile_dir, settingsDict):
with open(mainfile_dir + '/settings.cbs','w') as settingsfile:
settingsfile.write(json.dumps(settingsDict, indent=" "))
def restoreDefaultSettings(mainfile_dir):
save_settings(mainfile_dir, getDefaultSettings())
pass
def getDefaultCodebook(cb_name):
# entries will be of style {'entry_dir':'tags', ... }
codebook_name = cb_name
codebook_entries = {}
cb = {"codebook_name":codebook_name, "creation_date":time.strftime('%a %B %d, %Y')}
return json.dumps(cb, indent = " ")
| {
"content_hash": "f84602631df219427010a01e0bcb0b1b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 87,
"avg_line_length": 28.227272727272727,
"alnum_prop": 0.5909822866344605,
"repo_name": "gracefulcamel/Codebooks",
"id": "e5f3f7234194f5a3dd2be5f3132488e8a2d0850e",
"size": "1242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codebook_fileops/settingsops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58090"
}
],
"symlink_target": ""
} |
"""
Convert Kaggle format data into 1 file by pair of variables
"""
import os
import numpy as np
import pandas as pd
import csv
def format_col(df):
df.columns=['A','B']
A=""
B=""
for idx,row in df.iterrows():
A+=' '+str(float(row['A']))
B+=' ' + str(float(row['B']))
return A,B
def file_to_kag(pthtofile):
SplID=[]
type_info=[]
i=1
with open(pthtofile+'Dfinal_Pairs.csv','wb') as output:
writer=csv.writer(output)
writer.writerow(['SampleID','A','B'])
while os.path.exists(pthtofile+str(i)+'.txt'):
print(pthtofile+str(i)+'.txt')
df=pd.read_csv(pthtofile+str(i)+'.txt',sep=' | | | ')
A,B=format_col(df)
SampleID= os.path.basename(pthtofile)+'D'+str(i)
SplID.append(SampleID)
type_info.append('Numerical')
writer.writerow([SampleID,A,B])
if i==26091:
break
i+=1
print(len(SplID))
print(len(type_info))
info=pd.DataFrame([SplID,type_info,type_info])
info=info.transpose()
info.columns=['SampleID','A type','B type']
#info['SampleID']=pd.Series(SplID,index=info.index)
#info['Type-A']=pd.Series(type_info,index=info.index)
#info['Type-B']=pd.Series(type_info,index=info.index)
info.to_csv(pthtofile+'Dfinal_publicinfo.csv',index=False)
if __name__ == '__main__':
inputfile_type='/users/ao/diviyan/NoSave/DatasetD/pair'
file_to_kag(inputfile_type) | {
"content_hash": "408fe6d7d23be92f312bc5508226059f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 71,
"avg_line_length": 29.392156862745097,
"alnum_prop": 0.5783855903935957,
"repo_name": "Diviyan-Kalainathan/causal-humans",
"id": "309434de63d93e7bae3322560fc136297f9698f5",
"size": "1499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Cause-effect/util/file_to_kagf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "851380"
},
{
"name": "Shell",
"bytes": "988"
},
{
"name": "TeX",
"bytes": "288420"
}
],
"symlink_target": ""
} |
"""pytest for :mod:`pykern.pkcompat`
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import locale
import os
import pytest
import six
def setup_module():
"""Set locale so can test expected outputs.
TODO(robnagler) should test multiple locales.
"""
# Setting locale as a tuple doesn't work. Not clear this is cross-platform
os.environ["LANG"] = "en_US.UTF-8"
locale.setlocale(locale.LC_ALL)
def test_from_bytes():
from pykern import pkcompat
from pykern.pkunit import pkeq
b = pkcompat.to_bytes("你好")
s = pkcompat.from_bytes(b)
pkeq(s, "你好")
pkeq(b, b"\xe4\xbd\xa0\xe5\xa5\xbd")
if six.PY2:
pkeq(b, s)
else:
pkeq(False, b == s)
def test_locale_str_1():
"""Verify proper conversions"""
from pykern import pkcompat
s = pkcompat.locale_str(b"\xc2\xb0")
if six.PY2:
assert isinstance(
s, unicode
), "When locale_str is converted in PY2, it should return unicode"
else:
assert isinstance(
s, str
), "When locale_str is converted in not PY2, it should return str"
assert "°" == s, "Conversion should be same as literal unicode value"
if six.PY2:
before = unicode(b"\xc2\xb0", "utf8")
assert before == pkcompat.locale_str(
before
), "When string is already unicode, conversion yields same string"
before = str(123)
assert unicode(before) == pkcompat.locale_str(
before
), "When string is already unicode, conversion yields same string"
before = str(None)
assert unicode(before) == pkcompat.locale_str(
before
), "When string is already unicode, conversion yields same string"
else:
before = str(123)
assert before == pkcompat.locale_str(
before
), "When string is already unicode, conversion yields same string"
before = str(None)
assert before == pkcompat.locale_str(
before
), "When string is already unicode, conversion yields same string"
def test_locale_str_2():
"""Invalid utf8"""
from pykern import pkcompat
with pytest.raises(UnicodeDecodeError):
# TODO(robngler) set the locale?
pkcompat.locale_str(b"\x80")
def test_unicode_unescape():
from pykern import pkcompat
assert "\n" == pkcompat.unicode_unescape(r"\n")
| {
"content_hash": "7b7fed73d3197f58a7e5318bca518916",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 28.775280898876403,
"alnum_prop": 0.62827020695041,
"repo_name": "radiasoft/pykern",
"id": "211a84053f07dd778fa4c929495d3b9c6b1ad9d9",
"size": "2594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pkcompat_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "101531"
},
{
"name": "Python",
"bytes": "368016"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
} |
from datetime import date, timedelta
import re
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from casexml.apps.case.models import CommCareCase
from django.utils.translation import ugettext as _
import logging
from corehq.util.dates import iso_string_to_datetime
from custom.bihar.calculations.utils.xmlns import BP, NEW, MTB_ABORT, DELIVERY, REGISTRATION, PNC
from couchdbkit.exceptions import ResourceNotFound
from corehq.apps.users.models import CommCareUser, CouchUser
EMPTY_FIELD = "---"
def get_property(dict_obj, name, default=None):
if name in dict_obj:
if type(dict_obj[name]) is dict:
return dict_obj[name]["#value"]
return dict_obj[name]
else:
return default if default is not None else EMPTY_FIELD
class MCHDisplay(CaseDisplay):
def __init__(self, report, case):
try:
self.user = CommCareUser.get_by_user_id(case["user_id"])
if self.user is None:
self.user = CommCareUser.get_by_user_id(case["opened_by"])
except CouchUser.AccountTypeError:
# if we have web users submitting forms (e.g. via cloudcare) just don't bother
# with the rest of this data.
self.user = None
if self.user:
setattr(self, "_village", get_property(self.user.user_data, "village"))
setattr(self, "_asha_name", self.user.full_name if get_property(self.user.user_data, "role").upper() == "ASHA" else get_property(self.user.user_data, "partner_name"))
if get_property(self.user.user_data, "role").upper() == "ASHA":
setattr(self, "_asha_number", self.user.default_phone_number if self.user.default_phone_number else EMPTY_FIELD)
else:
setattr(self, "_asha_number", get_property(self.user.user_data, "partner_phone"))
setattr(self, "_awc_code_name", "%s, %s" % (get_property(self.user.user_data, "awc-code"), get_property(self.user.user_data, "village")))
setattr(self, "_aww_name", self.user.full_name if get_property(self.user.user_data, "role").upper() == "AWW" else get_property(self.user.user_data, "partner_name"))
if get_property(self.user.user_data, "role").upper() == "AWW":
setattr(self, "_aww_number", self.user.phone_numbers[0] if len(self.user.phone_numbers) > 0 else EMPTY_FIELD)
else:
setattr(self, "_aww_number", get_property(self.user.user_data, "partner_phone"))
super(MCHDisplay, self).__init__(report, case)
@property
def village(self):
return getattr(self, "_village", EMPTY_FIELD)
@property
def asha_name(self):
return getattr(self, "_asha_name", EMPTY_FIELD)
@property
def asha_number(self):
return getattr(self, "_asha_number", EMPTY_FIELD)
@property
def awc_code_name(self):
return getattr(self, "_awc_code_name", EMPTY_FIELD)
@property
def aww_name(self):
return getattr(self, "_aww_name", EMPTY_FIELD)
@property
def aww_number(self):
return getattr(self, "_aww_number", EMPTY_FIELD)
@property
def chw_name(self):
if self.user:
return "%s, \"%s\"" % (self.user.username, self.user.full_name)
else:
return _("Unknown user")
@property
def home_sba_assist(self):
return getattr(self, "_home_sba_assist", EMPTY_FIELD)
@property
def caste(self):
return getattr(self, "_caste", EMPTY_FIELD)
def parse_date(self, date_string):
if date_string != EMPTY_FIELD and date_string != '' and date_string is not None:
try:
# assuming it's a date string or datetime string,
# DefaultProperty will wrap it as the correct type
# todo: there has to be a better way
return str(self.report.date_to_json(iso_string_to_datetime(date_string)))
except AttributeError:
return _("Bad date format!")
except TypeError:
return _("Bad date format!")
else:
return EMPTY_FIELD
class MCHMotherDisplay(MCHDisplay):
def __init__(self, report, case_dict):
case = CommCareCase.get(case_dict["_id"])
forms = case.get_forms()
jsy_beneficiary = None
jsy_money = None
pnc_on_time_statuses = []
for form in forms:
form_dict = form.form
form_xmlns = form_dict["@xmlns"]
if NEW in form_xmlns:
setattr(self, "_caste", get_property(form_dict, "caste"))
elif DELIVERY in form_xmlns:
if get_property(form_dict, "where_born") != 'home':
setattr(self, "_home_sba_assist", get_property(form_dict, "where_born"))
else:
setattr(self, "_home_sba_assist", get_property(form_dict, "home_sba_assist"))
setattr(self, "_delivery_nature", get_property(form_dict, "delivery_nature"))
setattr(self, "_discharge_date", get_property(form_dict, "discharge_date"))
setattr(self, "_jsy_money_date", get_property(form_dict, "jsy_money_date"))
setattr(self, "_delivery_complications", get_property(form_dict, "delivery_complications"))
if 'case' in form_dict and 'update' in form_dict['case']:
setattr(self, "_family_planning_type", get_property(form_dict['case']['update'], "family_planning_type"))
jsy_money = get_property(form_dict, "jsy_money")
children_count = int(get_property(form_dict, "cast_num_children", 0))
if children_count == 0:
setattr(self, "_num_children", 'still_birth')
else:
setattr(self, "_num_children", children_count)
child_list = []
if children_count == 1 and "child_info" in form_dict:
child_list.append(form_dict["child_info"])
elif children_count > 1 and "child_info" in form_dict:
child_list = form_dict["child_info"]
for idx,child in enumerate(child_list):
case_child = {}
if "case" in child:
case_child = CommCareCase.get(child["case"]["@case_id"])
setattr(self, "_first_weight_%s" % (idx+1), str(get_property(child, "first_weight")))
setattr(self, "_breastfed_hour_%s" % (idx+1), get_property(child, "breastfed_hour"))
if case_child:
setattr(self, "_case_name_%s" % (idx+1), get_property(case_child, "name"))
setattr(self, "_gender_%s" % (idx+1), get_property(case_child, "gender"))
elif REGISTRATION in form_xmlns:
jsy_beneficiary = get_property(form_dict, "jsy_beneficiary")
elif PNC in form_xmlns:
child_list = []
if isinstance(form_dict["child_info"], list):
child_list.extend(form_dict["child_info"])
else:
child_list.append(form_dict["child_info"])
for child in child_list:
pnc_on_time_status = None
if (get_property(child, 'skin_to_skin') == 'yes' or get_property(child, 'wrapped') == 'yes') and get_property(child, 'warm_to_touch') == 'yes':
pnc_on_time_status = 'yes'
else:
pnc_on_time_status = 'no'
pnc_on_time_statuses.append(pnc_on_time_status)
elif BP in form_xmlns:
if "bp1" in form_dict:
bp = form_dict["bp1"]
for i in range(1, 5):
if "anc%s" % i in bp:
anc = bp["anc%s" % i]
if "anc%s_blood_pressure" % i in anc:
if anc["anc%s_blood_pressure" % i] == 'high_bloodpressure':
anc["anc%s_blood_pressure" % i] = 'high'
setattr(self, "_blood_pressure_%s" % i, anc["anc%s_blood_pressure" % i])
if "anc%s_weight" % i in anc:
setattr(self, "_weight_%s" % i, str(anc["anc%s_weight" % i]))
if "anc%s_hemoglobin" % i in anc and i == 1:
setattr(self, "_hemoglobin", anc["anc%s_hemoglobin" % i])
setattr(self, "_anemia", get_property(bp, "anaemia"))
if "bp2" in form_dict:
bp = form_dict["bp2"]
setattr(self, "_rti_sti", get_property(bp, "rti_sti"))
setattr(self, "_complications", get_property(form_dict, "bp_complications"))
elif MTB_ABORT in form_xmlns:
setattr(self, "_abortion_type", get_property(form_dict, "abortion_type"))
if jsy_beneficiary is not None and jsy_beneficiary != EMPTY_FIELD:
setattr(self, "_jsy_beneficiary", jsy_beneficiary)
else:
setattr(self, "_jsy_beneficiary", jsy_money)
if len(pnc_on_time_statuses) > 0:
if 'yes' in pnc_on_time_statuses:
setattr(self, "_all_pnc_on_time", 'yes')
else:
setattr(self, "_all_pnc_on_time", 'no')
super(MCHMotherDisplay, self).__init__(report, case_dict)
@property
def mother_name(self):
return get_property(self.case, "mother_name")
@property
def husband_name(self):
return get_property(self.case, "husband_name")
@property
def ward_number(self):
return get_property(self.case, "ward_number")
@property
def mobile_number(self):
return get_property(self.case, "mobile_number")
@property
def mobile_number_whose(self):
number = get_property(self.case, "mobile_number_whose")
if re.match(r"^mobile_", number):
r = re.compile(r"^mobile_", re.IGNORECASE)
return r.sub("", number)
else:
return number
@property
def mcts_id(self):
return get_property(self.case, "full_mcts_id")
@property
def dob_age(self):
if "mother_dob" in self.case and self.case["mother_dob"]:
try:
mother_dob = self.case["mother_dob"]
if type(mother_dob) is dict:
mother_dob = mother_dob["#value"]
days = (date.today() - CaseDisplay.parse_date(self, mother_dob).date()).days
mother_dob = self.parse_date(mother_dob)
return "%s, %s" % (mother_dob, days/365)
except AttributeError:
return _("Bad date format!")
else:
return EMPTY_FIELD
@property
def lmp(self):
return self.parse_date(get_property(self.case, "lmp"))
@property
def edd(self):
return self.parse_date(get_property(self.case, "edd"))
@property
def anc_date_1(self):
return self.parse_date(get_property(self.case, "anc_1_date"))
@property
def anc_date_2(self):
return self.parse_date(get_property(self.case, "anc_2_date"))
@property
def anc_date_3(self):
return self.parse_date(get_property(self.case, "anc_3_date"))
@property
def anc_date_4(self):
return self.parse_date(get_property(self.case, "anc_4_date"))
@property
def tt1_date(self):
return self.parse_date(get_property(self.case, "tt_1_date"))
@property
def tt2_date(self):
return self.parse_date(get_property(self.case, "tt_2_date"))
@property
def tt_booster(self):
return self.parse_date(get_property(self.case, "tt_booster_date"))
@property
def ifa_tablets(self):
return self.parse_date(get_property(self.case, "ifa_tablets_100"))
@property
def add(self):
return self.parse_date(get_property(self.case, "add"))
@property
def first_pnc_time(self):
return get_property(self.case, "first_pnc_time")
@property
def status(self):
return get_property(self.case, "status")
@property
def jsy_beneficiary(self):
return getattr(self, "_jsy_beneficiary", EMPTY_FIELD)
@property
def delivery_nature(self):
return getattr(self, "_delivery_nature", EMPTY_FIELD)
@property
def discharge_date(self):
return self.parse_date(str(getattr(self, "_discharge_date", EMPTY_FIELD)))
@property
def jsy_money_date(self):
return self.parse_date(str(getattr(self, "_jsy_money_date", EMPTY_FIELD)))
@property
def delivery_complications(self):
return getattr(self, "_delivery_complications", EMPTY_FIELD)
@property
def family_planning_type(self):
return getattr(self, "_family_planning_type", EMPTY_FIELD)
@property
def anemia(self):
return getattr(self, "_anemia", EMPTY_FIELD)
@property
def complications(self):
return getattr(self, "_complications", EMPTY_FIELD)
@property
def rti_sti(self):
return getattr(self, "_rti_sti", EMPTY_FIELD)
@property
def abortion_type(self):
return getattr(self, "_abortion_type", EMPTY_FIELD)
@property
def blood_pressure_1(self):
return getattr(self, "_blood_pressure_1", EMPTY_FIELD)
@property
def blood_pressure_2(self):
return getattr(self, "_blood_pressure_2", EMPTY_FIELD)
@property
def blood_pressure_3(self):
return getattr(self, "_blood_pressure_3", EMPTY_FIELD)
@property
def blood_pressure_4(self):
return getattr(self, "_blood_pressure_4", EMPTY_FIELD)
@property
def weight_1(self):
return getattr(self, "_weight_1", EMPTY_FIELD)
@property
def weight_2(self):
return getattr(self, "_weight_2", EMPTY_FIELD)
@property
def weight_3(self):
return getattr(self, "_weight_3", EMPTY_FIELD)
@property
def weight_4(self):
return getattr(self, "_weight_4", EMPTY_FIELD)
@property
def hemoglobin(self):
return getattr(self, "_hemoglobin", EMPTY_FIELD)
@property
def anc_completed(self):
lmp = self.lmp
anc_date_1 = self.anc_date_1
if lmp != EMPTY_FIELD and anc_date_1 != EMPTY_FIELD:
try:
return _("yes") if CaseDisplay.parse_date(self, self.anc_date_1) < (CaseDisplay.parse_date(self, self.lmp) + timedelta(days=12*7)) else _("no")
except AttributeError:
return _("Bad date format!")
else:
return EMPTY_FIELD
@property
def all_pnc_on_time(self):
return getattr(self, "_all_pnc_on_time", EMPTY_FIELD)
@property
def num_children(self):
return getattr(self, "_num_children", EMPTY_FIELD)
@property
def case_name_1(self):
return getattr(self, "_case_name_1", EMPTY_FIELD)
@property
def case_name_2(self):
return getattr(self, "_case_name_2", EMPTY_FIELD)
@property
def case_name_3(self):
return getattr(self, "_case_name_3", EMPTY_FIELD)
@property
def case_name_4(self):
return getattr(self, "_case_name_4", EMPTY_FIELD)
@property
def gender_1(self):
return getattr(self, "_gender_1", EMPTY_FIELD)
@property
def gender_2(self):
return getattr(self, "_gender_2", EMPTY_FIELD)
@property
def gender_3(self):
return getattr(self, "_gender_3", EMPTY_FIELD)
@property
def gender_4(self):
return getattr(self, "_gender_4", EMPTY_FIELD)
@property
def first_weight_1(self):
return getattr(self, "_first_weight_1", EMPTY_FIELD)
@property
def first_weight_2(self):
return getattr(self, "_first_weight_2", EMPTY_FIELD)
@property
def first_weight_3(self):
return getattr(self, "_first_weight_3", EMPTY_FIELD)
@property
def first_weight_4(self):
return getattr(self, "_first_weight_4", EMPTY_FIELD)
@property
def breastfed_hour_1(self):
return getattr(self, "_breastfed_hour_1", EMPTY_FIELD)
@property
def breastfed_hour_2(self):
return getattr(self, "_breastfed_hour_2", EMPTY_FIELD)
@property
def breastfed_hour_3(self):
return getattr(self, "_breastfed_hour_3", EMPTY_FIELD)
@property
def breastfed_hour_4(self):
return getattr(self, "_breastfed_hour_4", EMPTY_FIELD)
class MCHChildDisplay(MCHDisplay):
def __init__(self, report, case_dict):
# get mother case
if len(case_dict["indices"]) > 0:
try:
parent_case = CommCareCase.get(case_dict["indices"][0]["referenced_id"])
forms = parent_case.get_forms()
parent_json = parent_case.case_properties()
setattr(self, "_father_mother_name", "%s, %s" %(get_property(parent_json,"husband_name"), get_property(parent_json, "mother_name")))
setattr(self, "_full_mcts_id", get_property(parent_json, "full_mcts_id"))
setattr(self, "_ward_number", get_property(parent_json, "ward_number"))
setattr(self, "_mobile_number", get_property(parent_case, 'mobile_number'))
number = get_property(parent_case, "mobile_number_whose")
if re.match(r"^mobile_", number):
r = re.compile(r"^mobile_", re.IGNORECASE)
setattr(self, "_mobile_number_whose", r.sub("", number))
else:
setattr(self, "_mobile_number_whose", number)
for form in forms:
form_dict = form.form
form_xmlns = form_dict["@xmlns"]
if NEW in form_xmlns:
setattr(self, "_caste", get_property(form_dict, "caste"))
elif DELIVERY in form_xmlns:
if get_property(form_dict, "where_born") != 'home':
setattr(self, "_home_sba_assist", get_property(form_dict, "where_born"))
else:
setattr(self, "_home_sba_assist", get_property(form_dict, "home_sba_assist"))
except ResourceNotFound:
logging.error("ResourceNotFound: " + case_dict["indices"][0]["referenced_id"])
super(MCHChildDisplay, self).__init__(report, case_dict)
@property
def child_name(self):
return get_property(self.case, "name")
@property
def father_mother_name(self):
return getattr(self, "_father_mother_name", EMPTY_FIELD)
@property
def mcts_id(self):
return getattr(self, "_full_mcts_id", EMPTY_FIELD)
@property
def ward_number(self):
return getattr(self, "_ward_number", EMPTY_FIELD)
@property
def gender(self):
return get_property(self.case, "gender")
@property
def mobile_number(self):
return getattr(self, "_mobile_number", EMPTY_FIELD)
@property
def mobile_number_whose(self):
return getattr(self, "_mobile_number_whose", EMPTY_FIELD)
@property
def bcg_date(self):
return self.parse_date(get_property(self.case, "bcg_date"))
@property
def opv_0_date(self):
return self.parse_date(get_property(self.case, "opv_0_date"))
@property
def hep_b_0_date(self):
return self.parse_date(get_property(self.case, "hep_b_0_date"))
@property
def dpt_1_date(self):
return self.parse_date(get_property(self.case, "dpt_1_date"))
@property
def opv_1_date(self):
return self.parse_date(get_property(self.case, "opv_1_date"))
@property
def hep_b_1_date(self):
return self.parse_date(get_property(self.case, "hep_b_1_date"))
@property
def dpt_2_date(self):
return self.parse_date(get_property(self.case, "dpt_2_date"))
@property
def opv_2_date(self):
return self.parse_date(get_property(self.case, "opv_2_date"))
@property
def hep_b_2_date(self):
return self.parse_date(get_property(self.case, "hep_b_2_date"))
@property
def dpt_3_date(self):
return self.parse_date(get_property(self.case, "dpt_3_date"))
@property
def opv_3_date(self):
return self.parse_date(get_property(self.case, "opv_3_date"))
@property
def hep_b_3_date(self):
return self.parse_date(get_property(self.case, "hep_b_3_date"))
@property
def measles_date(self):
return self.parse_date(get_property(self.case, "measles_date"))
@property
def vit_a_1_date(self):
return self.parse_date(get_property(self.case, "vit_a_1_date"))
@property
def date_measles_booster(self):
return self.parse_date(get_property(self.case, "date_measles_booster"))
@property
def dpt_booster_date(self):
return self.parse_date(get_property(self.case, "dpt_booster_date"))
@property
def opv_booster_date(self):
return self.parse_date(get_property(self.case, "opv_booster_date"))
@property
def vit_a_2_date(self):
return self.parse_date(get_property(self.case, "vit_a_2_date"))
@property
def vit_a_3_date(self):
return self.parse_date(get_property(self.case, "vit_a_3_date"))
@property
def date_je(self):
return self.parse_date(get_property(self.case, "date_je"))
@property
def dob_age(self):
if "dob" in self.case and self.case["dob"]:
try:
dob = self.case["dob"]
if type(dob) is dict:
dob = dob["#value"]
days = (date.today() - CaseDisplay.parse_date(self, dob).date()).days
dob = self.parse_date(dob)
return "%s, %s" % (dob, int(days/365.25))
except AttributeError:
return _("Bad date format!")
else:
return EMPTY_FIELD
| {
"content_hash": "2efa147e4e953bb356f8191b17bcc98e",
"timestamp": "",
"source": "github",
"line_count": 635,
"max_line_length": 178,
"avg_line_length": 34.809448818897636,
"alnum_prop": 0.57338038364097,
"repo_name": "puttarajubr/commcare-hq",
"id": "eadfe1aa8bf19f295d83849e9ef62db9f03f6a4f",
"size": "22104",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "custom/bihar/reports/display.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
import os
import json
import grequests
import urlparse
import abstract_backend as sdm_absbackends
import util as sdm_util
DEFAULT_REST_HOSTS = ["http://localhost:8888"]
DEFAULT_MOUNT_PATH = "hsyn:///"
class RestBackendException(sdm_absbackends.AbstractBackendException):
pass
class RestBackendConfig(sdm_absbackends.AbstractBackendConfig):
"""
REST Backend Config
"""
def __init__(self):
self.default_mount_path = DEFAULT_MOUNT_PATH
self.rest_hosts = DEFAULT_REST_HOSTS
@classmethod
def from_dict(cls, d):
config = RestBackendConfig()
config.default_mount_path = d["default_mount_path"]
config.rest_hosts = d["rest_hosts"]
return config
@classmethod
def get_default_config(cls):
return RestBackendConfig()
def to_json(self):
return json.dumps({
"default_mount_path": self.default_mount_path,
"rest_hosts": self.rest_hosts
})
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "<RestBackendConfig %s>" % \
(self.rest_hosts)
class RestBackend(sdm_absbackends.AbstractBackend):
"""
REST Backend
"""
def __init__(self, backend_config):
self.backend_config = backend_config
@classmethod
def get_name(cls):
return "REST"
def is_legal_mount_path(self, mount_path):
parts = urlparse.urlparse(mount_path)
if not parts.scheme:
return False
session_name = self._get_session_name(parts.path)
if len(session_name) == 0:
return False
return True
def make_default_mount_path(self, dataset, default_mount_path):
parts = urlparse.urlparse(default_mount_path)
if not parts.scheme:
raise RestBackendException("cannot make default mount path for %s" % dataset)
mount_path = "%s://%s/%s" % (
parts.scheme,
parts.netloc,
dataset.strip().lower()
)
abs_mount_path = sdm_util.get_abs_path(mount_path)
return abs_mount_path
def _get_session_name(self, mount_path):
parts = urlparse.urlparse(mount_path)
path = parts.path.lstrip("/")
idx = path.find("/")
if idx > 0:
return path[:idx]
return path
def _raise_error_on_http_error(self, status_code):
if status_code >= 400 and status_code <= 599:
raise RestBackendException("received a http error - code %s" % status_code)
def _check_syndicate_user(self, rest_host, mount_id):
try:
url = "%s/user/check" % rest_host
params = {
"mount_id": mount_id
}
sdm_util.log_message("Sending a HTTP GET request : %s" % url)
req = [grequests.get(url, params=params)]
res = grequests.map(set(req))[0]
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
return sdm_util.to_bool(result["result"])
except Exception, e:
raise RestBackendException("cannot check user : %s" % e)
def _check_syndicate_user_multi(self, rest_hosts, mount_id):
try:
params = {
"mount_id": mount_id
}
reqs = []
for rest_host in rest_hosts:
url = "%s/user/check" % rest_host
sdm_util.log_message("Sending a HTTP GET request : %s" % url)
req = grequests.get(url, params=params)
reqs.append(req)
ress = grequests.map(set(reqs))
results = {}
idx = 0
for res in ress:
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
rest_host = rest_hosts[idx]
results[rest_host] = sdm_util.to_bool(result["result"])
idx += 1
return results
except Exception, e:
raise RestBackendException("cannot check user : %s" % e)
def _regist_syndicate_user(self, rest_host, mount_id, dataset, username, user_pkey, gateway_name, ms_host):
# check if mount_id already exists
skip_config = False
if self._check_syndicate_user(rest_host, mount_id):
skip_config = True
if not skip_config:
sdm_util.log_message("Setting up Syndicate for an user, %s" % username)
try:
# register
url = "%s/user/setup" % rest_host
values = {
"ms_url": ms_host,
"user": username,
"mount_id": mount_id,
"cert": user_pkey
}
sdm_util.log_message("Sending a HTTP POST request : %s" % url)
req = [grequests.post(url, data=values)]
res = grequests.map(set(req))[0]
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
r = sdm_util.to_bool(result["result"])
if not r:
raise RestBackendException("cannot setup Syndicate for an user, %s : %s" % (username, r))
sdm_util.log_message("Successfully set up Syndicate for an user, %s" % username)
except Exception, e:
raise RestBackendException("cannot setup Syndicate for an user, %s : %s" % (username, e))
def _regist_syndicate_user_multi(self, rest_hosts, mount_id, dataset, username, user_pkey, gateway_name, ms_host):
# check if mount_id already exists
check_results = self._check_syndicate_user_multi(rest_hosts, mount_id)
target_rest_hosts = []
for rest_host in rest_hosts:
if not check_results[rest_host]:
target_rest_hosts.append(rest_host)
if len(target_rest_hosts) > 0:
try:
# register
sdm_util.log_message("Setting up Syndicate for an user, %s" % username)
values = {
"ms_url": ms_host,
"user": username,
"mount_id": mount_id,
"cert": user_pkey
}
reqs = []
for rest_host in target_rest_hosts:
# for hosts who returned False at check
url = "%s/user/setup" % rest_host
sdm_util.log_message("Sending a HTTP POST request : %s" % url)
req = grequests.post(url, data=values)
reqs.append(req)
ress = grequests.map(set(reqs))
idx = 0
for res in ress:
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
r = sdm_util.to_bool(result["result"])
if not r:
raise RestBackendException("cannot setup Syndicate for an user, %s - %s : %s" % (target_rest_hosts[idx], username, r))
idx += 1
sdm_util.log_message("Successfully set up Syndicate for an user, %s" % username)
except Exception, e:
raise RestBackendException("cannot setup Syndicate for an user, %s : %s" % (username, e))
def _delete_syndicate_user(self, rest_host, mount_id):
# check if mount_id already exists
skip_delete = True
if self._check_syndicate_user(rest_host, mount_id):
skip_delete = False
if not skip_delete:
sdm_util.log_message("Deleting an user, %s" % mount_id)
try:
# delete
url = "%s/user/delete" % rest_host
params = {
"mount_id": mount_id
}
sdm_util.log_message("Sending a HTTP DELETE request : %s" % url)
req = [grequests.delete(url, params=params)]
res = grequests.map(set(req))[0]
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
r = sdm_util.to_bool(result["result"])
if not r:
raise RestBackendException("cannot delete an user : %s - " % r)
except Exception, e:
raise RestBackendException("cannot delete an user : %s" % e)
def _delete_syndicate_user_multi(self, rest_hosts, mount_id):
# check if mount_id already exists
check_results = self._check_syndicate_user_multi(rest_hosts, mount_id)
target_rest_hosts = []
for rest_host in rest_hosts:
if check_results[rest_host]:
target_rest_hosts.append(rest_host)
if len(target_rest_hosts) > 0:
try:
# delete
sdm_util.log_message("Deleting an user, %s" % mount_id)
params = {
"mount_id": mount_id
}
reqs = []
for rest_host in rest_hosts:
url = "%s/user/delete" % rest_host
sdm_util.log_message("Sending a HTTP DELETE request : %s" % url)
req = grequests.delete(url, params=params)
reqs.append(req)
ress = grequests.map(set(reqs))
idx = 0
for res in ress:
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
r = sdm_util.to_bool(result["result"])
if not r:
raise RestBackendException("cannot delete an user : %s - %s" % (rest_hosts[idx], r))
idx += 1
except Exception, e:
raise RestBackendException("cannot delete an user : %s" % e)
def _check_syndicate_gateway(self, rest_host, session_name):
try:
success = True
url = "%s/gateway/check" % rest_host
params = {
"session_name": session_name
}
req = [grequests.get(url, params=params)]
res = grequests.map(set(req))[0]
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
return sdm_util.to_bool(result["result"])
except Exception, e:
raise RestBackendException("cannot check mount : %s" % e)
def _check_syndicate_gateway_multi(self, rest_hosts, session_name):
try:
params = {
"session_name": session_name
}
reqs = []
for rest_host in rest_hosts:
url = "%s/gateway/check" % rest_host
sdm_util.log_message("Sending a HTTP GET request : %s" % url)
req = grequests.get(url, params=params)
reqs.append(req)
ress = grequests.map(set(reqs))
results = {}
idx = 0
for res in ress:
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
rest_host = rest_hosts[idx]
results[rest_host] = sdm_util.to_bool(result["result"])
idx += 1
return results
except Exception, e:
raise RestBackendException("cannot check mount : %s" % e)
def _regist_syndicate_gateway(self, rest_host, mount_id, dataset, gateway_name, session_name):
# check if session_name already exists
skip_config = False
if self._check_syndicate_gateway(rest_host, session_name):
skip_config = True
if not skip_config:
sdm_util.log_message("Registering a syndicate gateway, %s for %s" % (gateway_name, dataset))
try:
# register
url = "%s/gateway/setup" % rest_host
values = {
"mount_id": mount_id,
"session_name": session_name,
"session_key": dataset,
"volume": dataset,
"gateway": gateway_name,
"anonymous": "true"
}
sdm_util.log_message("Sending a HTTP POST request : %s" % url)
req = [grequests.post(url, data=values)]
res = grequests.map(set(req))[0]
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
r = sdm_util.to_bool(result["result"])
if not r:
raise RestBackendException("cannot register a syndicate gateway, %s for %s : %s" % (gateway_name, dataset, r))
sdm_util.log_message("Successfully registered a syndicate gateway, %s for %s" % (gateway_name, dataset))
except Exception, e:
raise RestBackendException("cannot register a syndicate gateway, %s for %s : %s" % (gateway_name, dataset, e))
def _regist_syndicate_gateway_multi(self, rest_hosts, mount_id, dataset, gateway_name, session_name):
# check if session_name already exists
check_results = self._check_syndicate_gateway_multi(rest_hosts, session_name)
target_rest_hosts = []
for rest_host in rest_hosts:
if not check_results[rest_host]:
target_rest_hosts.append(rest_host)
if len(target_rest_hosts) > 0:
try:
# register
sdm_util.log_message("Registering a syndicate gateway, %s for %s" % (gateway_name, dataset))
values = {
"mount_id": mount_id,
"session_name": session_name,
"session_key": dataset,
"volume": dataset,
"gateway": gateway_name,
"anonymous": "true"
}
reqs = []
for rest_host in target_rest_hosts:
# for hosts who returned False at check
url = "%s/gateway/setup" % rest_host
sdm_util.log_message("Sending a HTTP POST request : %s" % url)
req = grequests.post(url, data=values)
reqs.append(req)
ress = grequests.map(set(reqs))
idx = 0
for res in ress:
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
r = sdm_util.to_bool(result["result"])
if not r:
raise RestBackendException("cannot register a syndicate gateway, %s - %s for %s : %s" % (target_rest_hosts[idx], gateway_name, dataset, r))
idx += 1
sdm_util.log_message("Successfully registered a syndicate gateway, %s for %s" % (gateway_name, dataset))
except Exception, e:
raise RestBackendException("cannot register a syndicate gateway, %s for %s : %s" % (gateway_name, dataset, e))
def _delete_syndicate_gateway(self, rest_host, mount_id, dataset, session_name):
# check if session_name already exists
skip_delete = True
if self._check_syndicate_gateway(rest_host, session_name):
skip_delete = False
if not skip_delete:
sdm_util.log_message("Deleting a syndicate gateway, %s" % (session_name))
try:
# delete
url = "%s/gateway/delete" % rest_host
params = {
"session_name": session_name,
"session_key": dataset
}
sdm_util.log_message("Sending a HTTP DELETE request : %s" % url)
req = [grequests.delete(url, params=params)]
res = grequests.map(set(req))[0]
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
r = sdm_util.to_bool(result["result"])
if not r:
raise RestBackendException("cannot delete gateway : %s - " % r)
except Exception, e:
raise RestBackendException("cannot delete gateway : %s" % e)
def _delete_syndicate_gateway_multi(self, rest_hosts, mount_id, dataset, session_name):
# check if session_name already exists
check_results = self._check_syndicate_gateway_multi(rest_hosts, session_name)
target_rest_hosts = []
for rest_host in rest_hosts:
if check_results[rest_host]:
target_rest_hosts.append(rest_host)
if len(target_rest_hosts) > 0:
try:
sdm_util.log_message("Deleting a syndicate gateway, %s" % (session_name))
params = {
"session_name": session_name,
"session_key": dataset
}
reqs = []
for rest_host in rest_hosts:
url = "%s/gateway/delete" % rest_host
sdm_util.log_message("Sending a HTTP DELETE request : %s" % url)
req = grequests.delete(url, params=params)
reqs.append(req)
ress = grequests.map(set(reqs))
idx = 0
for res in ress:
self._raise_error_on_http_error(res.status_code)
result = res.json()
sdm_util.log_message("> RETURN : %s" % result, sdm_util.LogLevel.DEBUG)
r = sdm_util.to_bool(result["result"])
if not r:
raise RestBackendException("cannot delete gateway : %s - %s" % (rest_hosts[idx], r))
idx += 1
except Exception, e:
raise RestBackendException("cannot delete gateway : %s" % e)
def mount(self, mount_id, ms_host, dataset, username, user_pkey, gateway_name, mount_path):
sdm_util.print_message("Mounting a dataset %s to %s" % (dataset, mount_path), True)
session_name = self._get_session_name(mount_path)
self._regist_syndicate_user_multi(self.backend_config.rest_hosts, mount_id, dataset, username, user_pkey, gateway_name, ms_host)
self._regist_syndicate_gateway_multi(self.backend_config.rest_hosts, mount_id, dataset, gateway_name, session_name)
sdm_util.print_message("A dataset %s is mounted to %s" % (dataset, mount_path), True)
def check_mount(self, mount_id, dataset, mount_path):
session_name = self._get_session_name(mount_path)
try:
results = self._check_syndicate_gateway_multi(self.backend_config.rest_hosts, session_name)
result = True
for r_result in results.values():
if not r_result:
result = False
break
return result
except RestBackendException, e:
return False
def unmount(self, mount_id, dataset, mount_path, cleanup=False):
sdm_util.print_message("Unmounting a dataset %s mounted at %s" % (dataset, mount_path), True)
session_name = self._get_session_name(mount_path)
self._delete_syndicate_gateway_multi(self.backend_config.rest_hosts, mount_id, dataset, session_name)
if cleanup:
self._delete_syndicate_user_multi(self.backend_config.rest_hosts, mount_id)
sdm_util.print_message("Successfully unmounted a dataset %s mounted at %s" % (dataset, mount_path), True)
| {
"content_hash": "49bbdc0cca9aef39ce672ab13330a3a9",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 163,
"avg_line_length": 41.227364185110666,
"alnum_prop": 0.5291849682772084,
"repo_name": "syndicate-storage/syndicate-dataset-manager",
"id": "ab414a03e59f7eb220dcf0886e3040fc1c473a4e",
"size": "21242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sdm/rest_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86081"
}
],
"symlink_target": ""
} |
import sys
import logging
try:
import ldap
import ldap.filter
ldap.set_option(ldap.OPT_REFERRALS, 0)
except Exception, e:
logging.error('missing ldap, try "easy_install python-ldap"')
raise e
def ldap_auth(server='ldap', port=None,
base_dn='ou=users,dc=domain,dc=com',
mode='uid', secure=False, cert_path=None, cert_file=None,
bind_dn=None, bind_pw=None, filterstr='objectClass=*',
username_attrib='uid',
custom_scope='subtree',
allowed_groups=None,
manage_user=False,
user_firstname_attrib='cn:1',
user_lastname_attrib='cn:2',
user_mail_attrib='mail',
manage_groups=False,
db=None,
group_dn=None,
group_name_attrib='cn',
group_member_attrib='memberUid',
group_filterstr='objectClass=*',
logging_level='error'):
"""
to use ldap login with MS Active Directory:
from gluon.contrib.login_methods.ldap_auth import ldap_auth
auth.settings.login_methods.append(ldap_auth(
mode='ad', server='my.domain.controller',
base_dn='ou=Users,dc=domain,dc=com'))
to use ldap login with Notes Domino:
auth.settings.login_methods.append(ldap_auth(
mode='domino',server='my.domino.server'))
to use ldap login with OpenLDAP:
auth.settings.login_methods.append(ldap_auth(
server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com'))
to use ldap login with OpenLDAP and subtree search and (optionally)
multiple DNs:
auth.settings.login_methods.append(ldap_auth(
mode='uid_r', server='my.ldap.server',
base_dn=['ou=Users,dc=domain,dc=com','ou=Staff,dc=domain,dc=com']))
or (if using CN):
auth.settings.login_methods.append(ldap_auth(
mode='cn', server='my.ldap.server',
base_dn='ou=Users,dc=domain,dc=com'))
or you can full customize the search for user:
auth.settings.login_methods.append(ldap_auth(
mode='custom', server='my.ldap.server',
base_dn='ou=Users,dc=domain,dc=com',
username_attrib='uid',
custom_scope='subtree'))
the custom_scope can be: base, onelevel, subtree.
If using secure ldaps:// pass secure=True and cert_path="..."
If ldap is using GnuTLS then you need cert_file="..." instead cert_path
because cert_path isn't implemented in GnuTLS :(
If you need to bind to the directory with an admin account in order to
search it then specify bind_dn & bind_pw to use for this.
- currently only implemented for Active Directory
If you need to restrict the set of allowed users (e.g. to members of a
department) then specify an rfc4515 search filter string.
- currently only implemented for mode in ['ad', 'company', 'uid_r']
You can manage user attributes first name, last name, email from ldap:
auth.settings.login_methods.append(ldap_auth(...as usual...,
manage_user=True,
user_firstname_attrib='cn:1',
user_lastname_attrib='cn:2',
user_mail_attrib='mail'
))
Where:
manage_user - let web2py handle user data from ldap
user_firstname_attrib - the attribute containing the user's first name
optionally you can specify parts.
Example: cn: "John Smith" - 'cn:1'='John'
user_lastname_attrib - the attribute containing the user's last name
optionally you can specify parts.
Example: cn: "John Smith" - 'cn:2'='Smith'
user_mail_attrib - the attribute containing the user's email address
If you need group control from ldap to web2py app's database feel free
to set:
auth.settings.login_methods.append(ldap_auth(...as usual...,
manage_groups=True,
db=db,
group_dn='ou=Groups,dc=domain,dc=com',
group_name_attrib='cn',
group_member_attrib='memberUid',
group_filterstr='objectClass=*'
))
Where:
manage_group - let web2py handle the groups from ldap
db - is the database object (need to have auth_user, auth_group,
auth_membership)
group_dn - the ldap branch of the groups
group_name_attrib - the attribute where the group name is stored
group_member_attrib - the attribute containing the group members name
group_filterstr - as the filterstr but for group select
You can restrict login access to specific groups if you specify:
auth.settings.login_methods.append(ldap_auth(...as usual...,
allowed_groups=[...],
group_dn='ou=Groups,dc=domain,dc=com',
group_name_attrib='cn',
group_member_attrib='memberUid',#use 'member' for Active Directory
group_filterstr='objectClass=*'
))
Where:
allowed_groups - a list with allowed ldap group names
group_dn - the ldap branch of the groups
group_name_attrib - the attribute where the group name is stored
group_member_attrib - the attribute containing the group members name
group_filterstr - as the filterstr but for group select
If using Active Directory you must specify bind_dn and bind_pw for
allowed_groups unless anonymous bind works.
You can set the logging level with the "logging_level" parameter, default
is "error" and can be set to error, warning, info, debug.
"""
logger = logging.getLogger('web2py.auth.ldap_auth')
if logging_level == 'error':
logger.setLevel(logging.ERROR)
elif logging_level == 'warning':
logger.setLevel(logging.WARNING)
elif logging_level == 'info':
logger.setLevel(logging.INFO)
elif logging_level == 'debug':
logger.setLevel(logging.DEBUG)
def ldap_auth_aux(username,
password,
ldap_server=server,
ldap_port=port,
ldap_basedn=base_dn,
ldap_mode=mode,
ldap_binddn=bind_dn,
ldap_bindpw=bind_pw,
secure=secure,
cert_path=cert_path,
cert_file=cert_file,
filterstr=filterstr,
username_attrib=username_attrib,
custom_scope=custom_scope,
manage_user=manage_user,
user_firstname_attrib=user_firstname_attrib,
user_lastname_attrib=user_lastname_attrib,
user_mail_attrib=user_mail_attrib,
manage_groups=manage_groups,
allowed_groups=allowed_groups,
db=db):
if password == '': # http://tools.ietf.org/html/rfc4513#section-5.1.2
logger.warning('blank password not allowed')
return False
logger.debug('mode: [%s] manage_user: [%s] custom_scope: [%s]'
' manage_groups: [%s]' % (str(mode), str(manage_user),
str(custom_scope), str(manage_groups)))
if manage_user:
if user_firstname_attrib.count(':') > 0:
(user_firstname_attrib,
user_firstname_part) = user_firstname_attrib.split(':', 1)
user_firstname_part = (int(user_firstname_part) - 1)
else:
user_firstname_part = None
if user_lastname_attrib.count(':') > 0:
(user_lastname_attrib,
user_lastname_part) = user_lastname_attrib.split(':', 1)
user_lastname_part = (int(user_lastname_part) - 1)
else:
user_lastname_part = None
user_firstname_attrib = ldap.filter.escape_filter_chars(
user_firstname_attrib)
user_lastname_attrib = ldap.filter.escape_filter_chars(
user_lastname_attrib)
user_mail_attrib = ldap.filter.escape_filter_chars(
user_mail_attrib)
try:
if allowed_groups:
if not is_user_in_allowed_groups(username, password):
return False
con = init_ldap()
if ldap_mode == 'ad':
# Microsoft Active Directory
if '@' not in username:
domain = []
for x in ldap_basedn.split(','):
if "DC=" in x.upper():
domain.append(x.split('=')[-1])
username = "%s@%s" % (username, '.'.join(domain))
username_bare = username.split("@")[0]
con.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
# In cases where ForestDnsZones and DomainDnsZones are found,
# result will look like the following:
# ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones,
# DC=domain,DC=com']
if ldap_binddn:
# need to search directory with an admin account 1st
con.simple_bind_s(ldap_binddn, ldap_bindpw)
else:
# credentials should be in the form of [email protected]
con.simple_bind_s(username, password)
# this will throw an index error if the account is not found
# in the ldap_basedn
requested_attrs = ['sAMAccountName']
if manage_user:
requested_attrs.extend([user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib])
result = con.search_ext_s(
ldap_basedn, ldap.SCOPE_SUBTREE,
"(&(sAMAccountName=%s)(%s))" % (
ldap.filter.escape_filter_chars(username_bare),
filterstr),
requested_attrs)[0][1]
if not isinstance(result, dict):
# result should be a dict in the form
# {'sAMAccountName': [username_bare]}
logger.warning('User [%s] not found!' % username)
return False
if ldap_binddn:
# We know the user exists & is in the correct OU
# so now we just check the password
con.simple_bind_s(username, password)
username = username_bare
if ldap_mode == 'domino':
# Notes Domino
if "@" in username:
username = username.split("@")[0]
con.simple_bind_s(username, password)
if manage_user:
# TODO: sorry I have no clue how to query attrs in domino
result = {user_firstname_attrib: username,
user_lastname_attrib: None,
user_mail_attrib: None}
if ldap_mode == 'cn':
# OpenLDAP (CN)
dn = "cn=" + username + "," + ldap_basedn
con.simple_bind_s(dn, password)
if manage_user:
result = con.search_s(dn, ldap.SCOPE_BASE,
"(objectClass=*)",
[user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib])[0][1]
if ldap_mode == 'uid':
# OpenLDAP (UID)
dn = "uid=" + username + "," + ldap_basedn
con.simple_bind_s(dn, password)
if manage_user:
result = con.search_s(dn, ldap.SCOPE_BASE,
"(objectClass=*)",
[user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib])[0][1]
if ldap_mode == 'company':
# no DNs or password needed to search directory
dn = ""
pw = ""
# bind anonymously
con.simple_bind_s(dn, pw)
# search by e-mail address
filter = '(&(mail=%s)(%s))' % (
ldap.filter.escape_filter_chars(username),
filterstr)
# find the uid
attrs = ['uid']
if manage_user:
attrs.extend([user_firstname_attrib,
user_lastname_attrib,
user_mail_attrib])
# perform the actual search
company_search_result = con.search_s(ldap_basedn,
ldap.SCOPE_SUBTREE,
filter, attrs)
dn = company_search_result[0][0]
result = company_search_result[0][1]
# perform the real authentication test
con.simple_bind_s(dn, password)
if ldap_mode == 'uid_r':
# OpenLDAP (UID) with subtree search and multiple DNs
if isinstance(ldap_basedn, list):
basedns = ldap_basedn
else:
basedns = [ldap_basedn]
filter = '(&(uid=%s)(%s))' % (
ldap.filter.escape_filter_chars(username), filterstr)
found = False
for basedn in basedns:
try:
result = con.search_s(basedn, ldap.SCOPE_SUBTREE,
filter)
if result:
user_dn = result[0][0]
# Check the password
con.simple_bind_s(user_dn, password)
found = True
break
except ldap.LDAPError, detail:
(exc_type, exc_value) = sys.exc_info()[:2]
logger.warning(
"ldap_auth: searching %s for %s resulted in %s: %s\n" %
(basedn, filter, exc_type, exc_value)
)
if not found:
logger.warning('User [%s] not found!' % username)
return False
result = result[0][1]
if ldap_mode == 'custom':
# OpenLDAP (username_attrs) with subtree search and
# multiple DNs
if isinstance(ldap_basedn, list):
basedns = ldap_basedn
else:
basedns = [ldap_basedn]
filter = '(&(%s=%s)(%s))' % (username_attrib,
ldap.filter.escape_filter_chars(
username),
filterstr)
if custom_scope == 'subtree':
ldap_scope = ldap.SCOPE_SUBTREE
elif custom_scope == 'base':
ldap_scope = ldap.SCOPE_BASE
elif custom_scope == 'onelevel':
ldap_scope = ldap.SCOPE_ONELEVEL
found = False
for basedn in basedns:
try:
result = con.search_s(basedn, ldap_scope, filter)
if result:
user_dn = result[0][0]
# Check the password
con.simple_bind_s(user_dn, password)
found = True
break
except ldap.LDAPError, detail:
(exc_type, exc_value) = sys.exc_info()[:2]
logger.warning(
"ldap_auth: searching %s for %s resulted in %s: %s\n" %
(basedn, filter, exc_type, exc_value)
)
if not found:
logger.warning('User [%s] not found!' % username)
return False
result = result[0][1]
if manage_user:
logger.info('[%s] Manage user data' % str(username))
try:
if user_firstname_part is not None:
store_user_firstname = result[user_firstname_attrib][
0].split(' ', 1)[user_firstname_part]
else:
store_user_firstname = result[user_firstname_attrib][0]
except KeyError, e:
store_user_firstname = None
try:
if user_lastname_part is not None:
store_user_lastname = result[user_lastname_attrib][
0].split(' ', 1)[user_lastname_part]
else:
store_user_lastname = result[user_lastname_attrib][0]
except KeyError, e:
store_user_lastname = None
try:
store_user_mail = result[user_mail_attrib][0]
except KeyError, e:
store_user_mail = None
try:
#
# user as username
# #################
user_in_db = db(db.auth_user.username == username)
if user_in_db.count() > 0:
user_in_db.update(first_name=store_user_firstname,
last_name=store_user_lastname,
email=store_user_mail)
else:
db.auth_user.insert(first_name=store_user_firstname,
last_name=store_user_lastname,
email=store_user_mail,
username=username)
except:
#
# user as email
# ##############
user_in_db = db(db.auth_user.email == username)
if user_in_db.count() > 0:
user_in_db.update(first_name=store_user_firstname,
last_name=store_user_lastname)
else:
db.auth_user.insert(first_name=store_user_firstname,
last_name=store_user_lastname,
email=username)
con.unbind()
if manage_groups:
if not do_manage_groups(username, password):
return False
return True
except ldap.INVALID_CREDENTIALS, e:
return False
except ldap.LDAPError, e:
import traceback
logger.warning('[%s] Error in ldap processing' % str(username))
logger.debug(traceback.format_exc())
return False
except IndexError, ex: # for AD membership test
import traceback
logger.warning('[%s] Ldap result indexing error' % str(username))
logger.debug(traceback.format_exc())
return False
def is_user_in_allowed_groups(username,
password=None,
allowed_groups=allowed_groups):
"""
Figure out if the username is a member of an allowed group
in ldap or not
"""
#
# Get all group name where the user is in actually in ldap
# #########################################################
ldap_groups_of_the_user = get_user_groups_from_ldap(username, password)
# search for allowed group names
if type(allowed_groups) != type(list()):
allowed_groups = [allowed_groups]
for group in allowed_groups:
if ldap_groups_of_the_user.count(group) > 0:
# Match
return True
# No match
return False
def do_manage_groups(username,
password=None,
db=db):
"""
Manage user groups
Get all user's group from ldap and refresh the already stored
ones in web2py's application database or create new groups
according to ldap.
"""
logger.info('[%s] Manage user groups' % str(username))
try:
#
# Get all group name where the user is in actually in ldap
# #########################################################
ldap_groups_of_the_user = get_user_groups_from_ldap(
username, password)
#
# Get all group name where the user is in actually in local db
# #############################################################
try:
db_user_id = db(db.auth_user.username == username).select(
db.auth_user.id).first().id
except:
try:
db_user_id = db(db.auth_user.email == username).select(
db.auth_user.id).first().id
except AttributeError, e:
#
# There is no user in local db
# We create one
# ##############################
try:
db_user_id = db.auth_user.insert(username=username,
first_name=username)
except AttributeError, e:
db_user_id = db.auth_user.insert(email=username,
first_name=username)
if not db_user_id:
logging.error(
'There is no username or email for %s!' % username)
raise
db_group_search = db((db.auth_membership.user_id == db_user_id) &
(db.auth_user.id == db.auth_membership.user_id) &
(db.auth_group.id == db.auth_membership.group_id))
db_groups_of_the_user = list()
db_group_id = dict()
if db_group_search.count() > 0:
for group in db_group_search.select(db.auth_group.id,
db.auth_group.role,
distinct=True):
db_group_id[group.role] = group.id
db_groups_of_the_user.append(group.role)
logging.debug('db groups of user %s: %s' %
(username, str(db_groups_of_the_user)))
#
# Delete user membership from groups where user is not anymore
# #############################################################
for group_to_del in db_groups_of_the_user:
if ldap_groups_of_the_user.count(group_to_del) == 0:
db((db.auth_membership.user_id == db_user_id) &
(db.auth_membership.group_id == \
db_group_id[group_to_del])).delete()
#
# Create user membership in groups where user is not in already
# ##############################################################
for group_to_add in ldap_groups_of_the_user:
if db_groups_of_the_user.count(group_to_add) == 0:
if db(db.auth_group.role == group_to_add).count() == 0:
gid = db.auth_group.insert(role=group_to_add,
description='Generated from LDAP')
else:
gid = db(db.auth_group.role == group_to_add).select(
db.auth_group.id).first().id
db.auth_membership.insert(user_id=db_user_id,
group_id=gid)
except:
logger.warning("[%s] Groups are not managed successfully!" %
str(username))
import traceback
logger.debug(traceback.format_exc())
return False
return True
def init_ldap(ldap_server=server,
ldap_port=port,
ldap_basedn=base_dn,
ldap_mode=mode,
secure=secure,
cert_path=cert_path,
cert_file=cert_file):
"""
Inicialize ldap connection
"""
logger.info('[%s] Initialize ldap connection' % str(ldap_server))
if secure:
if not ldap_port:
ldap_port = 636
con = ldap.initialize(
"ldaps://" + ldap_server + ":" + str(ldap_port))
if cert_path:
con.set_option(ldap.OPT_X_TLS_CACERTDIR, cert_path)
if cert_file:
con.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file)
else:
if not ldap_port:
ldap_port = 389
con = ldap.initialize(
"ldap://" + ldap_server + ":" + str(ldap_port))
return con
def get_user_groups_from_ldap(username,
password=None,
base_dn=base_dn,
ldap_binddn=bind_dn,
ldap_bindpw=bind_pw,
group_dn=group_dn,
group_name_attrib=group_name_attrib,
group_member_attrib=group_member_attrib,
group_filterstr=group_filterstr,
ldap_mode=mode):
"""
Get all group names from ldap where the user is in
"""
logger.info('[%s] Get user groups from ldap' % str(username))
#
# Get all group name where the user is in actually in ldap
# #########################################################
# Initialize ldap
if not group_dn:
group_dn = base_dn
con = init_ldap()
logger.debug('Username init: [%s]' % username)
if ldap_mode == 'ad':
#
# Get the AD username
# ####################
if '@' not in username:
domain = []
for x in base_dn.split(','):
if "DC=" in x.upper():
domain.append(x.split('=')[-1])
username = "%s@%s" % (username, '.'.join(domain))
username_bare = username.split("@")[0]
con.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
# In cases where ForestDnsZones and DomainDnsZones are found,
# result will look like the following:
# ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones,
# DC=domain,DC=com']
if ldap_binddn:
# need to search directory with an admin account 1st
con.simple_bind_s(ldap_binddn, ldap_bindpw)
logger.debug('Ldap bind connect...')
else:
# credentials should be in the form of [email protected]
con.simple_bind_s(username, password)
logger.debug('Ldap username connect...')
# We have to use the full string
username = con.search_ext_s(base_dn, ldap.SCOPE_SUBTREE,
"(&(sAMAccountName=%s)(%s))" %
(ldap.filter.escape_filter_chars(username_bare),
filterstr), ["cn"])[0][0]
else:
if ldap_binddn:
# need to search directory with an bind_dn account 1st
con.simple_bind_s(ldap_binddn, ldap_bindpw)
else:
# bind as anonymous
con.simple_bind_s('', '')
# if username is None, return empty list
if username is None:
return list()
# search for groups where user is in
filter = '(&(%s=%s)(%s))' % (ldap.filter.escape_filter_chars(
group_member_attrib
),
ldap.filter.escape_filter_chars(username),
group_filterstr)
group_search_result = con.search_s(group_dn,
ldap.SCOPE_SUBTREE,
filter, [group_name_attrib])
ldap_groups_of_the_user = list()
for group_row in group_search_result:
group = group_row[1]
if type(group) == dict and group.has_key(group_name_attrib):
ldap_groups_of_the_user.extend(group[group_name_attrib])
con.unbind()
logger.debug('User groups: %s' % ldap_groups_of_the_user)
return list(ldap_groups_of_the_user)
if filterstr[0] == '(' and filterstr[-1] == ')': # rfc4515 syntax
filterstr = filterstr[1:-1] # parens added again where used
return ldap_auth_aux
| {
"content_hash": "55eebe7eae3eb7f0e4ce6fa12bfe9ebc",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 79,
"avg_line_length": 45.02413273001508,
"alnum_prop": 0.46584704030015744,
"repo_name": "ericgriffin/fflock",
"id": "c374209e153fd184cc6e71325fc973d93a9b900b",
"size": "29943",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web2py/gluon/contrib/login_methods/ldap_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "135005"
},
{
"name": "JavaScript",
"bytes": "827775"
},
{
"name": "PHP",
"bytes": "1296"
},
{
"name": "Python",
"bytes": "6011919"
},
{
"name": "Shell",
"bytes": "90328"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from calories.models import Food
class FoodSerializer(serializers.ModelSerializer):
class Meta:
fields = ('pk', 'name', 'calories')
model = Food
class FoodListSerializer(serializers.Serializer):
foods = serializers.ListField(child=FoodSerializer(many=True))
class Meta:
fields = ('foods')
class FoodRequestSerializer(serializers.Serializer):
'''
Used for getting data
'''
foods = serializers.CharField()
| {
"content_hash": "4a3b5df6328859f27e9d2270635e0dd1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 22.818181818181817,
"alnum_prop": 0.6992031872509961,
"repo_name": "banjocat/calorie-find",
"id": "2e2bfa67b2ab4ebf4f85d7108805fb44872b7c41",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/calorie_find/calories/serializers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "506"
},
{
"name": "Python",
"bytes": "12387"
},
{
"name": "Shell",
"bytes": "3658"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, str))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual("He said, \"Go Away.\"", string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual("Don\'t", string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(52, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
self.assertEqual("Hello \"world\"", string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual("Hello, world", string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual("Hello, world", string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual("Hello, ", original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(1, len(string))
| {
"content_hash": "c0828aa78e54d00cb9aeac50fcf4378f",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 85,
"avg_line_length": 33.59340659340659,
"alnum_prop": 0.6107294733398757,
"repo_name": "kimegitee/python-koans",
"id": "36439435cc0cd95b59b5e8d4676c23e49246d1ab",
"size": "3104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/koans/about_strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "330527"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
import pygame, sys, random
from pygame.locals import *
def doRectsOverlap(rect1, rect2):
for a, b in [(rect1, rect2), (rect2, rect1)]:
# Check if a's corners are inside b
if ((isPointInsideRect(a.left, a.top, b)) or
(isPointInsideRect(a.left, a.bottom, b)) or
(isPointInsideRect(a.right, a.top, b)) or
(isPointInsideRect(a.right, a.bottom, b))):
return True
return False
def isPointInsideRect(x, y, rect):
if (x > rect.left) and (x < rect.right) and (y > rect.top) and (y < rect.bottom):
return True
else:
return False
# set up pygame
pygame.init()
mainClock = pygame.time.Clock()
# set up the window
WINDOWWIDTH = 400
WINDOWHEIGHT = 400
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
pygame.display.set_caption('Collision Detection')
# set up direction variables
DOWNLEFT = 1
DOWNRIGHT = 3
UPLEFT = 7
UPRIGHT = 9
MOVESPEED = 4
# set up the colors
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
WHITE = (255, 255, 255)
# set up the bouncer and food data structures
foodCounter = 0
NEWFOOD = 40
FOODSIZE = 20
bouncer = {'rect':pygame.Rect(300, 100, 50, 50), 'dir':UPLEFT}
foods = []
for i in range(20):
foods.append(pygame.Rect(random.randint(0, WINDOWWIDTH - FOODSIZE), random.randint(0, WINDOWHEIGHT - FOODSIZE), FOODSIZE, FOODSIZE))
# run the game loop
while True:
# check for the QUIT event
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
foodCounter += 1
if foodCounter >= NEWFOOD:
# add new food
foodCounter = 0
foods.append(pygame.Rect(random.randint(0, WINDOWWIDTH - FOODSIZE), random.randint(0, WINDOWHEIGHT - FOODSIZE), FOODSIZE, FOODSIZE))
# draw the black background onto the surface
windowSurface.fill(BLACK)
# move the bouncer data structure
if bouncer['dir'] == DOWNLEFT:
bouncer['rect'].left -= MOVESPEED
bouncer['rect'].top += MOVESPEED
if bouncer['dir'] == DOWNRIGHT:
bouncer['rect'].left += MOVESPEED
bouncer['rect'].top += MOVESPEED
if bouncer['dir'] == UPLEFT:
bouncer['rect'].left -= MOVESPEED
bouncer['rect'].top -= MOVESPEED
if bouncer['dir'] == UPRIGHT:
bouncer['rect'].left += MOVESPEED
bouncer['rect'].top -= MOVESPEED
# check if the bouncer has move out of the window
if bouncer['rect'].top < 0:
# bouncer has moved past the top
if bouncer['dir'] == UPLEFT:
bouncer['dir'] = DOWNLEFT
if bouncer['dir'] == UPRIGHT:
bouncer['dir'] = DOWNRIGHT
if bouncer['rect'].bottom > WINDOWHEIGHT:
# bouncer has moved past the bottom
if bouncer['dir'] == DOWNLEFT:
bouncer['dir'] = UPLEFT
if bouncer['dir'] == DOWNRIGHT:
bouncer['dir'] = UPRIGHT
if bouncer['rect'].left < 0:
# bouncer has moved past the left side
if bouncer['dir'] == DOWNLEFT:
bouncer['dir'] = DOWNRIGHT
if bouncer['dir'] == UPLEFT:
bouncer['dir'] = UPRIGHT
if bouncer['rect'].right > WINDOWWIDTH:
# bouncer has moved past the right side
if bouncer['dir'] == DOWNRIGHT:
bouncer['dir'] = DOWNLEFT
if bouncer['dir'] == UPRIGHT:
bouncer['dir'] = UPLEFT
# draw the bouncer onto the surface
pygame.draw.rect(windowSurface, WHITE, bouncer['rect'])
# check if the bouncer has intersected with any food squares.
for food in foods[:]:
if doRectsOverlap(bouncer['rect'], food):
foods.remove(food)
# draw the food
for i in range(len(foods)):
pygame.draw.rect(windowSurface, GREEN, foods[i])
# draw the window onto the screen
pygame.display.update()
mainClock.tick(40)
| {
"content_hash": "c155d345f7771ad6a12d69a63e78eb19",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 140,
"avg_line_length": 31.792,
"alnum_prop": 0.5931051836940111,
"repo_name": "renebentes/Python4Zumbis",
"id": "a770c99ad8416cc7d9b622ac8762092a964ff0f8",
"size": "3974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Materiais/Penultima_semana/py04-collisionDetection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "715972"
},
{
"name": "Smarty",
"bytes": "756"
}
],
"symlink_target": ""
} |
import unittest
# noinspection PyUnresolvedReferences
from requests.utils import DEFAULT_ACCEPT_ENCODING
from streamlink import Streamlink
from streamlink.stream import AkamaiHDStream
from streamlink.stream import HDSStream
from streamlink.stream import HLSStream
from streamlink.stream import HTTPStream
from streamlink.stream import RTMPStream
from streamlink.stream.stream import Stream
class TestStreamToJSON(unittest.TestCase):
def setUp(self):
self.session = Streamlink()
def test_base_stream(self):
stream = Stream(self.session)
self.assertEqual(
{"type": "stream"},
stream.__json__()
)
def test_http_stream(self):
url = "http://test.se/stream"
stream = HTTPStream(self.session, url, headers={"User-Agent": "Test"})
self.assertEqual(
{"type": "http",
"url": url,
"method": "GET",
"body": None,
"headers": {
"User-Agent": "Test",
"Accept": "*/*",
"Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
"Connection": "keep-alive",
}},
stream.__json__()
)
def test_hls_stream(self):
url = "http://test.se/stream.m3u8"
master = "http://test.se/master.m3u8"
stream = HLSStream(self.session, url, headers={"User-Agent": "Test"})
self.assertEqual(
{
"type": "hls",
"url": url,
"headers": {
"User-Agent": "Test",
"Accept": "*/*",
"Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
"Connection": "keep-alive",
}
},
stream.__json__()
)
stream = HLSStream(self.session, url, master, headers={"User-Agent": "Test"})
self.assertEqual(
{
"type": "hls",
"url": url,
"headers": {
"User-Agent": "Test",
"Accept": "*/*",
"Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
"Connection": "keep-alive",
},
"master": master
},
stream.__json__()
)
def test_hds_stream(self):
stream = HDSStream(self.session, "http://test.se/", "http://test.se/stream.f4m",
"http://test.se/stream/1.bootstrap", headers={"User-Agent": "Test"})
self.assertEqual(
{"type": "hds",
"baseurl": "http://test.se/",
"bootstrap": "http://test.se/stream/1.bootstrap",
"url": "http://test.se/stream.f4m",
"metadata": None,
"headers": {"User-Agent": "Test"},
"params": {}},
stream.__json__()
)
def test_akamai_stream(self):
stream = AkamaiHDStream(self.session, "http://akamai.test.se/stream")
self.assertEqual(
{'swf': None,
'type': 'akamaihd',
'url': 'http://akamai.test.se/stream'},
stream.__json__()
)
def test_rtmp_stream(self):
stream = RTMPStream(self.session, {"rtmp": "rtmp://test.se/app/play_path",
"swfVfy": "http://test.se/player.swf",
"swfhash": "test",
"swfsize": 123456,
"playPath": "play_path"})
self.assertEqual(
{"type": "rtmp",
"args": [],
"params": {"rtmp": "rtmp://test.se/app/play_path",
"swfVfy": "http://test.se/player.swf",
"swfhash": "test",
"swfsize": 123456,
"playPath": "play_path"}},
stream.__json__()
)
| {
"content_hash": "0a25a4fb120609a5169fac68ccb7d378",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 95,
"avg_line_length": 34.10344827586207,
"alnum_prop": 0.4598078867542973,
"repo_name": "melmorabity/streamlink",
"id": "629be4cd299174888b10223732da29ce6b4b2587",
"size": "3956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_stream_json.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1537432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SlugsConfig(AppConfig):
name = 'yepes.contrib.slugs'
verbose_name = _('Slugs')
| {
"content_hash": "f74423a5398eed5116fac98ba840ef0c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 22.7,
"alnum_prop": 0.7400881057268722,
"repo_name": "samuelmaudo/yepes",
"id": "73440fc394d749d80eb3273765e7632b11c528d4",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yepes/contrib/slugs/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1485"
},
{
"name": "CSS",
"bytes": "2805"
},
{
"name": "HTML",
"bytes": "18543"
},
{
"name": "JavaScript",
"bytes": "56039"
},
{
"name": "Python",
"bytes": "2415982"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.