text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""ActiveMaster definition."""
from config_bootstrap import Master
class PDFiumTryserver(Master.Master4a):
project_name = 'PDFiumTryserver'
master_port = 21405
slave_port = 31405
master_port_alt = 26405
buildbot_url = 'https://build.chromium.org/p/tryserver.client.pdfium/'
buildbucket_bucket = 'master.tryserver.client.pdfium'
service_account_file = 'service-account-chromium-tryserver.json'
| {
"content_hash": "73f8f3843b43962e04097920db206c78",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 72,
"avg_line_length": 34.083333333333336,
"alnum_prop": 0.7603911980440098,
"repo_name": "eunchong/build",
"id": "2c1c703712ecdfcaf6bb0b5b7beeb0f276c9eaa4",
"size": "744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "masters/master.tryserver.client.pdfium/master_site_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
} |
import codecs
import inspect
import os
import shutil
import stat
import sys
import textwrap
import tempfile
import unittest
import argparse
from io import StringIO
from test import support
class StdIOBuffer(StringIO):
pass
class TestCase(unittest.TestCase):
def assertEqual(self, obj1, obj2):
if obj1 != obj2:
print('')
print(repr(obj1))
print(repr(obj2))
print(obj1)
print(obj2)
super(TestCase, self).assertEqual(obj1, obj2)
def setUp(self):
# The tests assume that line wrapping occurs at 80 columns, but this
# behaviour can be overridden by setting the COLUMNS environment
# variable. To ensure that this assumption is true, unset COLUMNS.
env = support.EnvironmentVarGuard()
env.unset("COLUMNS")
self.addCleanup(env.__exit__)
class TempDirMixin(object):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.old_dir = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_dir)
shutil.rmtree(self.temp_dir, True)
def create_readonly_file(self, filename):
file_path = os.path.join(self.temp_dir, filename)
with open(file_path, 'w') as file:
file.write(filename)
os.chmod(file_path, stat.S_IREAD)
class Sig(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class NS(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
sorted_items = sorted(self.__dict__.items())
kwarg_str = ', '.join(['%s=%r' % tup for tup in sorted_items])
return '%s(%s)' % (type(self).__name__, kwarg_str)
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class ArgumentParserError(Exception):
def __init__(self, message, stdout=None, stderr=None, error_code=None):
Exception.__init__(self, message, stdout, stderr)
self.message = message
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def stderr_to_parser_error(parse_args, *args, **kwargs):
# if this is being called recursively and stderr or stdout is already being
# redirected, simply call the function and let the enclosing function
# catch the exception
if isinstance(sys.stderr, StdIOBuffer) or isinstance(sys.stdout, StdIOBuffer):
return parse_args(*args, **kwargs)
# if this is not being called recursively, redirect stderr and
# use it as the ArgumentParserError message
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StdIOBuffer()
sys.stderr = StdIOBuffer()
try:
try:
result = parse_args(*args, **kwargs)
for key in list(vars(result)):
if getattr(result, key) is sys.stdout:
setattr(result, key, old_stdout)
if getattr(result, key) is sys.stderr:
setattr(result, key, old_stderr)
return result
except SystemExit:
code = sys.exc_info()[1].code
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
raise ArgumentParserError("SystemExit", stdout, stderr, code)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class ErrorRaisingArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
parse_args = super(ErrorRaisingArgumentParser, self).parse_args
return stderr_to_parser_error(parse_args, *args, **kwargs)
def exit(self, *args, **kwargs):
exit = super(ErrorRaisingArgumentParser, self).exit
return stderr_to_parser_error(exit, *args, **kwargs)
def error(self, *args, **kwargs):
error = super(ErrorRaisingArgumentParser, self).error
return stderr_to_parser_error(error, *args, **kwargs)
class ParserTesterMetaclass(type):
"""Adds parser tests using the class attributes.
Classes of this type should specify the following attributes:
argument_signatures -- a list of Sig objects which specify
the signatures of Argument objects to be created
failures -- a list of args lists that should cause the parser
to fail
successes -- a list of (initial_args, options, remaining_args) tuples
where initial_args specifies the string args to be parsed,
options is a dict that should match the vars() of the options
parsed out of initial_args, and remaining_args should be any
remaining unparsed arguments
"""
def __init__(cls, name, bases, bodydict):
if name == 'ParserTestCase':
return
# default parser signature is empty
if not hasattr(cls, 'parser_signature'):
cls.parser_signature = Sig()
if not hasattr(cls, 'parser_class'):
cls.parser_class = ErrorRaisingArgumentParser
# ---------------------------------------
# functions for adding optional arguments
# ---------------------------------------
def no_groups(parser, argument_signatures):
"""Add all arguments directly to the parser"""
for sig in argument_signatures:
parser.add_argument(*sig.args, **sig.kwargs)
def one_group(parser, argument_signatures):
"""Add all arguments under a single group in the parser"""
group = parser.add_argument_group('foo')
for sig in argument_signatures:
group.add_argument(*sig.args, **sig.kwargs)
def many_groups(parser, argument_signatures):
"""Add each argument in its own group to the parser"""
for i, sig in enumerate(argument_signatures):
group = parser.add_argument_group('foo:%i' % i)
group.add_argument(*sig.args, **sig.kwargs)
# --------------------------
# functions for parsing args
# --------------------------
def listargs(parser, args):
"""Parse the args by passing in a list"""
return parser.parse_args(args)
def sysargs(parser, args):
"""Parse the args by defaulting to sys.argv"""
old_sys_argv = sys.argv
sys.argv = [old_sys_argv[0]] + args
try:
return parser.parse_args()
finally:
sys.argv = old_sys_argv
# class that holds the combination of one optional argument
# addition method and one arg parsing method
class AddTests(object):
def __init__(self, tester_cls, add_arguments, parse_args):
self._add_arguments = add_arguments
self._parse_args = parse_args
add_arguments_name = self._add_arguments.__name__
parse_args_name = self._parse_args.__name__
for test_func in [self.test_failures, self.test_successes]:
func_name = test_func.__name__
names = func_name, add_arguments_name, parse_args_name
test_name = '_'.join(names)
def wrapper(self, test_func=test_func):
test_func(self)
try:
wrapper.__name__ = test_name
except TypeError:
pass
setattr(tester_cls, test_name, wrapper)
def _get_parser(self, tester):
args = tester.parser_signature.args
kwargs = tester.parser_signature.kwargs
parser = tester.parser_class(*args, **kwargs)
self._add_arguments(parser, tester.argument_signatures)
return parser
def test_failures(self, tester):
parser = self._get_parser(tester)
for args_str in tester.failures:
args = args_str.split()
raises = tester.assertRaises
raises(ArgumentParserError, parser.parse_args, args)
def test_successes(self, tester):
parser = self._get_parser(tester)
for args, expected_ns in tester.successes:
if isinstance(args, str):
args = args.split()
result_ns = self._parse_args(parser, args)
tester.assertEqual(expected_ns, result_ns)
# add tests for each combination of an optionals adding method
# and an arg parsing method
for add_arguments in [no_groups, one_group, many_groups]:
for parse_args in [listargs, sysargs]:
AddTests(cls, add_arguments, parse_args)
bases = TestCase,
ParserTestCase = ParserTesterMetaclass('ParserTestCase', bases, {})
# ===============
# Optionals tests
# ===============
class TestOptionalsSingleDash(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [Sig('-x')]
failures = ['-x', 'a', '--foo', '-x --foo', '-x -y']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
('-xa', NS(x='a')),
('-x -1', NS(x='-1')),
('-x-1', NS(x='-1')),
]
class TestOptionalsSingleDashCombined(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [
Sig('-x', action='store_true'),
Sig('-yyy', action='store_const', const=42),
Sig('-z'),
]
failures = ['a', '--foo', '-xa', '-x --foo', '-x -z', '-z -x',
'-yx', '-yz a', '-yyyx', '-yyyza', '-xyza']
successes = [
('', NS(x=False, yyy=None, z=None)),
('-x', NS(x=True, yyy=None, z=None)),
('-za', NS(x=False, yyy=None, z='a')),
('-z a', NS(x=False, yyy=None, z='a')),
('-xza', NS(x=True, yyy=None, z='a')),
('-xz a', NS(x=True, yyy=None, z='a')),
('-x -za', NS(x=True, yyy=None, z='a')),
('-x -z a', NS(x=True, yyy=None, z='a')),
('-y', NS(x=False, yyy=42, z=None)),
('-yyy', NS(x=False, yyy=42, z=None)),
('-x -yyy -za', NS(x=True, yyy=42, z='a')),
('-x -yyy -z a', NS(x=True, yyy=42, z='a')),
]
class TestOptionalsSingleDashLong(ParserTestCase):
"""Test an Optional with a multi-character single-dash option string"""
argument_signatures = [Sig('-foo')]
failures = ['-foo', 'a', '--foo', '-foo --foo', '-foo -y', '-fooa']
successes = [
('', NS(foo=None)),
('-foo a', NS(foo='a')),
('-foo -1', NS(foo='-1')),
('-fo a', NS(foo='a')),
('-f a', NS(foo='a')),
]
class TestOptionalsSingleDashSubsetAmbiguous(ParserTestCase):
"""Test Optionals where option strings are subsets of each other"""
argument_signatures = [Sig('-f'), Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-foo', '-fo', '-foo b', '-foob', '-fooba', '-foora']
successes = [
('', NS(f=None, foobar=None, foorab=None)),
('-f a', NS(f='a', foobar=None, foorab=None)),
('-fa', NS(f='a', foobar=None, foorab=None)),
('-foa', NS(f='oa', foobar=None, foorab=None)),
('-fooa', NS(f='ooa', foobar=None, foorab=None)),
('-foobar a', NS(f=None, foobar='a', foorab=None)),
('-foorab a', NS(f=None, foobar=None, foorab='a')),
]
class TestOptionalsSingleDashAmbiguous(ParserTestCase):
"""Test Optionals that partially match but are not subsets"""
argument_signatures = [Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-f a', '-fa', '-foa', '-foo', '-fo', '-foo b']
successes = [
('', NS(foobar=None, foorab=None)),
('-foob a', NS(foobar='a', foorab=None)),
('-foor a', NS(foobar=None, foorab='a')),
('-fooba a', NS(foobar='a', foorab=None)),
('-foora a', NS(foobar=None, foorab='a')),
('-foobar a', NS(foobar='a', foorab=None)),
('-foorab a', NS(foobar=None, foorab='a')),
]
class TestOptionalsNumeric(ParserTestCase):
"""Test an Optional with a short opt string"""
argument_signatures = [Sig('-1', dest='one')]
failures = ['-1', 'a', '-1 --foo', '-1 -y', '-1 -1', '-1 -2']
successes = [
('', NS(one=None)),
('-1 a', NS(one='a')),
('-1a', NS(one='a')),
('-1-2', NS(one='-2')),
]
class TestOptionalsDoubleDash(ParserTestCase):
"""Test an Optional with a double-dash option string"""
argument_signatures = [Sig('--foo')]
failures = ['--foo', '-f', '-f a', 'a', '--foo -x', '--foo --bar']
successes = [
('', NS(foo=None)),
('--foo a', NS(foo='a')),
('--foo=a', NS(foo='a')),
('--foo -2.5', NS(foo='-2.5')),
('--foo=-2.5', NS(foo='-2.5')),
]
class TestOptionalsDoubleDashPartialMatch(ParserTestCase):
"""Tests partial matching with a double-dash option string"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--bat'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--ba=4', '--badge 5']
successes = [
('', NS(badger=False, bat=None)),
('--bat X', NS(badger=False, bat='X')),
('--bad', NS(badger=True, bat=None)),
('--badg', NS(badger=True, bat=None)),
('--badge', NS(badger=True, bat=None)),
('--badger', NS(badger=True, bat=None)),
]
class TestOptionalsDoubleDashPrefixMatch(ParserTestCase):
"""Tests when one double-dash option string is a prefix of another"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--ba'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--badge 5']
successes = [
('', NS(badger=False, ba=None)),
('--ba X', NS(badger=False, ba='X')),
('--ba=X', NS(badger=False, ba='X')),
('--bad', NS(badger=True, ba=None)),
('--badg', NS(badger=True, ba=None)),
('--badge', NS(badger=True, ba=None)),
('--badger', NS(badger=True, ba=None)),
]
class TestOptionalsSingleDoubleDash(ParserTestCase):
"""Test an Optional with single- and double-dash option strings"""
argument_signatures = [
Sig('-f', action='store_true'),
Sig('--bar'),
Sig('-baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-fbaz', '-bazf', '-b B', 'B']
successes = [
('', NS(f=False, bar=None, baz=None)),
('-f', NS(f=True, bar=None, baz=None)),
('--ba B', NS(f=False, bar='B', baz=None)),
('-f --bar B', NS(f=True, bar='B', baz=None)),
('-f -b', NS(f=True, bar=None, baz=42)),
('-ba -f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixChars(ParserTestCase):
"""Test an Optional with option strings with custom prefixes"""
parser_signature = Sig(prefix_chars='+:/', add_help=False)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz', '-h', '--help', '+h', '::help', '/help']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixCharsAddedHelp(ParserTestCase):
"""When ``-`` not in prefix_chars, default operators created for help
should use the prefix_chars in use rather than - or --
http://bugs.python.org/issue9444"""
parser_signature = Sig(prefix_chars='+:/', add_help=True)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42))
]
class TestOptionalsAlternatePrefixCharsMultipleShortArgs(ParserTestCase):
"""Verify that Optionals must be called with their defined prefixes"""
parser_signature = Sig(prefix_chars='+-', add_help=False)
argument_signatures = [
Sig('-x', action='store_true'),
Sig('+y', action='store_true'),
Sig('+z', action='store_true'),
]
failures = ['-w',
'-xyz',
'+x',
'-y',
'+xyz',
]
successes = [
('', NS(x=False, y=False, z=False)),
('-x', NS(x=True, y=False, z=False)),
('+y -x', NS(x=True, y=True, z=False)),
('+yz -x', NS(x=True, y=True, z=True)),
]
class TestOptionalsShortLong(ParserTestCase):
"""Test a combination of single- and double-dash option strings"""
argument_signatures = [
Sig('-v', '--verbose', '-n', '--noisy', action='store_true'),
]
failures = ['--x --verbose', '-N', 'a', '-v x']
successes = [
('', NS(verbose=False)),
('-v', NS(verbose=True)),
('--verbose', NS(verbose=True)),
('-n', NS(verbose=True)),
('--noisy', NS(verbose=True)),
]
class TestOptionalsDest(ParserTestCase):
"""Tests various means of setting destination"""
argument_signatures = [Sig('--foo-bar'), Sig('--baz', dest='zabbaz')]
failures = ['a']
successes = [
('--foo-bar f', NS(foo_bar='f', zabbaz=None)),
('--baz g', NS(foo_bar=None, zabbaz='g')),
('--foo-bar h --baz i', NS(foo_bar='h', zabbaz='i')),
('--baz j --foo-bar k', NS(foo_bar='k', zabbaz='j')),
]
class TestOptionalsDefault(ParserTestCase):
"""Tests specifying a default for an Optional"""
argument_signatures = [Sig('-x'), Sig('-y', default=42)]
failures = ['a']
successes = [
('', NS(x=None, y=42)),
('-xx', NS(x='x', y=42)),
('-yy', NS(x=None, y='y')),
]
class TestOptionalsNargsDefault(ParserTestCase):
"""Tests not specifying the number of args for an Optional"""
argument_signatures = [Sig('-x')]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
]
class TestOptionalsNargs1(ParserTestCase):
"""Tests specifying the 1 arg for an Optional"""
argument_signatures = [Sig('-x', nargs=1)]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x=['a'])),
]
class TestOptionalsNargs3(ParserTestCase):
"""Tests specifying the 3 args for an Optional"""
argument_signatures = [Sig('-x', nargs=3)]
failures = ['a', '-x', '-x a', '-x a b', 'a -x', 'a -x b']
successes = [
('', NS(x=None)),
('-x a b c', NS(x=['a', 'b', 'c'])),
]
class TestOptionalsNargsOptional(ParserTestCase):
"""Tests specifying an Optional arg for an Optional"""
argument_signatures = [
Sig('-w', nargs='?'),
Sig('-x', nargs='?', const=42),
Sig('-y', nargs='?', default='spam'),
Sig('-z', nargs='?', type=int, const='42', default='84'),
]
failures = ['2']
successes = [
('', NS(w=None, x=None, y='spam', z=84)),
('-w', NS(w=None, x=None, y='spam', z=84)),
('-w 2', NS(w='2', x=None, y='spam', z=84)),
('-x', NS(w=None, x=42, y='spam', z=84)),
('-x 2', NS(w=None, x='2', y='spam', z=84)),
('-y', NS(w=None, x=None, y=None, z=84)),
('-y 2', NS(w=None, x=None, y='2', z=84)),
('-z', NS(w=None, x=None, y='spam', z=42)),
('-z 2', NS(w=None, x=None, y='spam', z=2)),
]
class TestOptionalsNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [
Sig('-x', nargs='*'),
Sig('-y', nargs='*', default='spam'),
]
failures = ['a']
successes = [
('', NS(x=None, y='spam')),
('-x', NS(x=[], y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y', NS(x=None, y=[])),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsNargsOneOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts one or more"""
argument_signatures = [
Sig('-x', nargs='+'),
Sig('-y', nargs='+', default='spam'),
]
failures = ['a', '-x', '-y', 'a -x', 'a -y b']
successes = [
('', NS(x=None, y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsChoices(ParserTestCase):
"""Tests specifying the choices for an Optional"""
argument_signatures = [
Sig('-f', choices='abc'),
Sig('-g', type=int, choices=range(5))]
failures = ['a', '-f d', '-fad', '-ga', '-g 6']
successes = [
('', NS(f=None, g=None)),
('-f a', NS(f='a', g=None)),
('-f c', NS(f='c', g=None)),
('-g 0', NS(f=None, g=0)),
('-g 03', NS(f=None, g=3)),
('-fb -g4', NS(f='b', g=4)),
]
class TestOptionalsRequired(ParserTestCase):
"""Tests the an optional action that is required"""
argument_signatures = [
Sig('-x', type=int, required=True),
]
failures = ['a', '']
successes = [
('-x 1', NS(x=1)),
('-x42', NS(x=42)),
]
class TestOptionalsActionStore(ParserTestCase):
"""Tests the store action for an Optional"""
argument_signatures = [Sig('-x', action='store')]
failures = ['a', 'a -x']
successes = [
('', NS(x=None)),
('-xfoo', NS(x='foo')),
]
class TestOptionalsActionStoreConst(ParserTestCase):
"""Tests the store_const action for an Optional"""
argument_signatures = [Sig('-y', action='store_const', const=object)]
failures = ['a']
successes = [
('', NS(y=None)),
('-y', NS(y=object)),
]
class TestOptionalsActionStoreFalse(ParserTestCase):
"""Tests the store_false action for an Optional"""
argument_signatures = [Sig('-z', action='store_false')]
failures = ['a', '-za', '-z a']
successes = [
('', NS(z=True)),
('-z', NS(z=False)),
]
class TestOptionalsActionStoreTrue(ParserTestCase):
"""Tests the store_true action for an Optional"""
argument_signatures = [Sig('--apple', action='store_true')]
failures = ['a', '--apple=b', '--apple b']
successes = [
('', NS(apple=False)),
('--apple', NS(apple=True)),
]
class TestOptionalsActionAppend(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append')]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=None)),
('--baz a', NS(baz=['a'])),
('--baz a --baz b', NS(baz=['a', 'b'])),
]
class TestOptionalsActionAppendWithDefault(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append', default=['X'])]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=['X'])),
('--baz a', NS(baz=['X', 'a'])),
('--baz a --baz b', NS(baz=['X', 'a', 'b'])),
]
class TestOptionalsActionAppendConst(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=None)),
('-b', NS(b=[Exception])),
('-b -cx -b -cyz', NS(b=[Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionAppendConstWithDefault(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception, default=['X']),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=['X'])),
('-b', NS(b=['X', Exception])),
('-b -cx -b -cyz', NS(b=['X', Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionCount(ParserTestCase):
"""Tests the count action for an Optional"""
argument_signatures = [Sig('-x', action='count')]
failures = ['a', '-x a', '-x b', '-x a -x b']
successes = [
('', NS(x=None)),
('-x', NS(x=1)),
]
# ================
# Positional tests
# ================
class TestPositionalsNargsNone(ParserTestCase):
"""Test a Positional that doesn't specify nargs"""
argument_signatures = [Sig('foo')]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo='a')),
]
class TestPositionalsNargs1(ParserTestCase):
"""Test a Positional that specifies an nargs of 1"""
argument_signatures = [Sig('foo', nargs=1)]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo=['a'])),
]
class TestPositionalsNargs2(ParserTestCase):
"""Test a Positional that specifies an nargs of 2"""
argument_signatures = [Sig('foo', nargs=2)]
failures = ['', 'a', '-x', 'a b c']
successes = [
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMore(ParserTestCase):
"""Test a Positional that specifies unlimited nargs"""
argument_signatures = [Sig('foo', nargs='*')]
failures = ['-x']
successes = [
('', NS(foo=[])),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMoreDefault(ParserTestCase):
"""Test a Positional that specifies unlimited nargs and a default"""
argument_signatures = [Sig('foo', nargs='*', default='bar')]
failures = ['-x']
successes = [
('', NS(foo='bar')),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOneOrMore(ParserTestCase):
"""Test a Positional that specifies one or more nargs"""
argument_signatures = [Sig('foo', nargs='+')]
failures = ['', '-x']
successes = [
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOptional(ParserTestCase):
"""Tests an Optional Positional"""
argument_signatures = [Sig('foo', nargs='?')]
failures = ['-x', 'a b']
successes = [
('', NS(foo=None)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalDefault(ParserTestCase):
"""Tests an Optional Positional with a default value"""
argument_signatures = [Sig('foo', nargs='?', default=42)]
failures = ['-x', 'a b']
successes = [
('', NS(foo=42)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalConvertedDefault(ParserTestCase):
"""Tests an Optional Positional with a default value
that needs to be converted to the appropriate type.
"""
argument_signatures = [
Sig('foo', nargs='?', type=int, default='42'),
]
failures = ['-x', 'a b', '1 2']
successes = [
('', NS(foo=42)),
('1', NS(foo=1)),
]
class TestPositionalsNargsNoneNone(ParserTestCase):
"""Test two Positionals that don't specify nargs"""
argument_signatures = [Sig('foo'), Sig('bar')]
failures = ['', '-x', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsNone1(ParserTestCase):
"""Test a Positional with no nargs followed by one with 1"""
argument_signatures = [Sig('foo'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargs2None(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar')]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsNoneZeroOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with unlimited"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='*')]
failures = ['', '--foo']
successes = [
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOneOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with one or more"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOptional(ParserTestCase):
"""Test a Positional with no nargs followed by one with an Optional"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo='a', bar=None)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsZeroOrMoreNone(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar')]
failures = ['', '--foo']
successes = [
('a', NS(foo=[], bar='a')),
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOneOrMoreNone(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOptionalNone(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='?', default=42), Sig('bar')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=42, bar='a')),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargs2ZeroOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with unlimited"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='*')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a', 'b'], bar=[])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2OneOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with one or more"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a', 'a b']
successes = [
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2Optional(ParserTestCase):
"""Test a Positional with 2 nargs followed by one optional"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a', 'a b c d']
successes = [
('a b', NS(foo=['a', 'b'], bar=None)),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsZeroOrMore1(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar', nargs=1)]
failures = ['', '--foo', ]
successes = [
('a', NS(foo=[], bar=['a'])),
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOneOrMore1(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOptional1(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargsNoneZeroOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, unlimited nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='*'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=[], baz=['b'])),
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
]
class TestPositionalsNargsNoneOneOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, one or more nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='+'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a', 'b']
successes = [
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
('a b c d', NS(foo='a', bar=['b', 'c'], baz=['d'])),
]
class TestPositionalsNargsNoneOptional1(ParserTestCase):
"""Test three Positionals: no nargs, optional narg and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='?', default=0.625),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=0.625, baz=['b'])),
('a b c', NS(foo='a', bar='b', baz=['c'])),
]
class TestPositionalsNargsOptionalOptional(ParserTestCase):
"""Test two optional nargs"""
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='?', default=42),
]
failures = ['--foo', 'a b c']
successes = [
('', NS(foo=None, bar=42)),
('a', NS(foo='a', bar=42)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsOptionalZeroOrMore(ParserTestCase):
"""Test an Optional narg followed by unlimited nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='*')]
failures = ['--foo']
successes = [
('', NS(foo=None, bar=[])),
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsOptionalOneOrMore(ParserTestCase):
"""Test an Optional narg followed by one or more nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='+')]
failures = ['', '--foo']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsChoicesString(ParserTestCase):
"""Test a set of single-character choices"""
argument_signatures = [Sig('spam', choices=set('abcdefg'))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('a', NS(spam='a')),
('g', NS(spam='g')),
]
class TestPositionalsChoicesInt(ParserTestCase):
"""Test a set of integer choices"""
argument_signatures = [Sig('spam', type=int, choices=range(20))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('4', NS(spam=4)),
('15', NS(spam=15)),
]
class TestPositionalsActionAppend(ParserTestCase):
"""Test the 'append' action"""
argument_signatures = [
Sig('spam', action='append'),
Sig('spam', action='append', nargs=2),
]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(spam=['a', ['b', 'c']])),
]
# ========================================
# Combined optionals and positionals tests
# ========================================
class TestOptionalsNumericAndPositionals(ParserTestCase):
"""Tests negative number args when numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-4', dest='y', action='store_true'),
]
failures = ['-2', '-315']
successes = [
('', NS(x=None, y=False)),
('a', NS(x='a', y=False)),
('-4', NS(x=None, y=True)),
('-4 a', NS(x='a', y=True)),
]
class TestOptionalsAlmostNumericAndPositionals(ParserTestCase):
"""Tests negative number args when almost numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-k4', dest='y', action='store_true'),
]
failures = ['-k3']
successes = [
('', NS(x=None, y=False)),
('-2', NS(x='-2', y=False)),
('a', NS(x='a', y=False)),
('-k4', NS(x=None, y=True)),
('-k4 a', NS(x='a', y=True)),
]
class TestEmptyAndSpaceContainingArguments(ParserTestCase):
argument_signatures = [
Sig('x', nargs='?'),
Sig('-y', '--yyy', dest='y'),
]
failures = ['-y']
successes = [
([''], NS(x='', y=None)),
(['a badger'], NS(x='a badger', y=None)),
(['-a badger'], NS(x='-a badger', y=None)),
(['-y', ''], NS(x=None, y='')),
(['-y', 'a badger'], NS(x=None, y='a badger')),
(['-y', '-a badger'], NS(x=None, y='-a badger')),
(['--yyy=a badger'], NS(x=None, y='a badger')),
(['--yyy=-a badger'], NS(x=None, y='-a badger')),
]
class TestPrefixCharacterOnlyArguments(ParserTestCase):
parser_signature = Sig(prefix_chars='-+')
argument_signatures = [
Sig('-', dest='x', nargs='?', const='badger'),
Sig('+', dest='y', type=int, default=42),
Sig('-+-', dest='z', action='store_true'),
]
failures = ['-y', '+ -']
successes = [
('', NS(x=None, y=42, z=False)),
('-', NS(x='badger', y=42, z=False)),
('- X', NS(x='X', y=42, z=False)),
('+ -3', NS(x=None, y=-3, z=False)),
('-+-', NS(x=None, y=42, z=True)),
('- ===', NS(x='===', y=42, z=False)),
]
class TestNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [Sig('-x', nargs='*'), Sig('y', nargs='*')]
failures = []
successes = [
('', NS(x=None, y=[])),
('-x', NS(x=[], y=[])),
('-x a', NS(x=['a'], y=[])),
('-x a -- b', NS(x=['a'], y=['b'])),
('a', NS(x=None, y=['a'])),
('a -x', NS(x=[], y=['a'])),
('a -x b', NS(x=['b'], y=['a'])),
]
class TestNargsRemainder(ParserTestCase):
"""Tests specifying a positional with nargs=REMAINDER"""
argument_signatures = [Sig('x'), Sig('y', nargs='...'), Sig('-z')]
failures = ['', '-z', '-z Z']
successes = [
('X', NS(x='X', y=[], z=None)),
('-z Z X', NS(x='X', y=[], z='Z')),
('X A B -z Z', NS(x='X', y=['A', 'B', '-z', 'Z'], z=None)),
('X Y --foo', NS(x='X', y=['Y', '--foo'], z=None)),
]
class TestOptionLike(ParserTestCase):
"""Tests options that may or may not be arguments"""
argument_signatures = [
Sig('-x', type=float),
Sig('-3', type=float, dest='y'),
Sig('z', nargs='*'),
]
failures = ['-x', '-y2.5', '-xa', '-x -a',
'-x -3', '-x -3.5', '-3 -3.5',
'-x -2.5', '-x -2.5 a', '-3 -.5',
'a x -1', '-x -1 a', '-3 -1 a']
successes = [
('', NS(x=None, y=None, z=[])),
('-x 2.5', NS(x=2.5, y=None, z=[])),
('-x 2.5 a', NS(x=2.5, y=None, z=['a'])),
('-3.5', NS(x=None, y=0.5, z=[])),
('-3-.5', NS(x=None, y=-0.5, z=[])),
('-3 .5', NS(x=None, y=0.5, z=[])),
('a -3.5', NS(x=None, y=0.5, z=['a'])),
('a', NS(x=None, y=None, z=['a'])),
('a -x 1', NS(x=1.0, y=None, z=['a'])),
('-x 1 a', NS(x=1.0, y=None, z=['a'])),
('-3 1 a', NS(x=None, y=1.0, z=['a'])),
]
class TestDefaultSuppress(ParserTestCase):
"""Test actions with suppressed defaults"""
argument_signatures = [
Sig('foo', nargs='?', default=argparse.SUPPRESS),
Sig('bar', nargs='*', default=argparse.SUPPRESS),
Sig('--baz', action='store_true', default=argparse.SUPPRESS),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefaultSuppress(ParserTestCase):
"""Test actions with a parser-level default of SUPPRESS"""
parser_signature = Sig(argument_default=argparse.SUPPRESS)
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefault42(ParserTestCase):
"""Test actions with a parser-level default of 42"""
parser_signature = Sig(argument_default=42, version='1.0')
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS(foo=42, bar=42, baz=42)),
('a', NS(foo='a', bar=42, baz=42)),
('a b', NS(foo='a', bar=['b'], baz=42)),
('--baz', NS(foo=42, bar=42, baz=True)),
('a --baz', NS(foo='a', bar=42, baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestArgumentsFromFile(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFile, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
('recursive', '-a\n'
'A\n'
'@hello'),
('invalid', '@no-such-path\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('-a'),
Sig('x'),
Sig('y', nargs='+'),
]
failures = ['', '-b', 'X', '@invalid', '@missing']
successes = [
('X Y', NS(a=None, x='X', y=['Y'])),
('X -a A Y Z', NS(a='A', x='X', y=['Y', 'Z'])),
('@hello X', NS(a=None, x='hello world!', y=['X'])),
('X @hello', NS(a=None, x='X', y=['hello world!'])),
('-a B @recursive Y Z', NS(a='A', x='hello world!', y=['Y', 'Z'])),
('X @recursive Z -a B', NS(a='B', x='X', y=['hello world!', 'Z'])),
]
class TestArgumentsFromFileConverter(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFileConverter, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
class FromFileConverterArgumentParser(ErrorRaisingArgumentParser):
def convert_arg_line_to_args(self, arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser_class = FromFileConverterArgumentParser
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('y', nargs='+'),
]
failures = []
successes = [
('@hello X', NS(y=['hello', 'world!', 'X'])),
]
# =====================
# Type conversion tests
# =====================
class TestFileTypeRepr(TestCase):
def test_r(self):
type = argparse.FileType('r')
self.assertEqual("FileType('r')", repr(type))
def test_wb_1(self):
type = argparse.FileType('wb', 1)
self.assertEqual("FileType('wb', 1)", repr(type))
class RFile(object):
seen = {}
def __init__(self, name):
self.name = name
def __eq__(self, other):
if other in self.seen:
text = self.seen[other]
else:
text = self.seen[other] = other.read()
other.close()
if not isinstance(text, str):
text = text.decode('ascii')
return self.name == other.name == text
class TestFileTypeR(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeR, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType()),
Sig('spam', type=argparse.FileType('r')),
]
failures = ['-x', '', 'non-existent-file.txt']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
('readonly', NS(x=None, spam=RFile('readonly'))),
]
class TestFileTypeRB(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeRB, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
argument_signatures = [
Sig('-x', type=argparse.FileType('rb')),
Sig('spam', type=argparse.FileType('rb')),
]
failures = ['-x', '']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
]
class WFile(object):
seen = set()
def __init__(self, name):
self.name = name
def __eq__(self, other):
if other not in self.seen:
text = 'Check that file is writable.'
if 'b' in other.mode:
text = text.encode('ascii')
other.write(text)
other.close()
self.seen.add(other)
return self.name == other.name
class TestFileTypeW(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for writing files"""
def setUp(self):
super(TestFileTypeW, self).setUp()
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType('w')),
Sig('spam', type=argparse.FileType('w')),
]
failures = ['-x', '', 'readonly']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestFileTypeWB(TempDirMixin, ParserTestCase):
argument_signatures = [
Sig('-x', type=argparse.FileType('wb')),
Sig('spam', type=argparse.FileType('wb')),
]
failures = ['-x', '']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestTypeCallable(ParserTestCase):
"""Test some callables as option/argument types"""
argument_signatures = [
Sig('--eggs', type=complex),
Sig('spam', type=float),
]
failures = ['a', '42j', '--eggs a', '--eggs 2i']
successes = [
('--eggs=42 42', NS(eggs=42, spam=42.0)),
('--eggs 2j -- -1.5', NS(eggs=2j, spam=-1.5)),
('1024.675', NS(eggs=None, spam=1024.675)),
]
class TestTypeUserDefined(ParserTestCase):
"""Test a user-defined option/argument type"""
class MyType(TestCase):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=MyType),
Sig('spam', type=MyType),
]
failures = []
successes = [
('a -x b', NS(x=MyType('b'), spam=MyType('a'))),
('-xf g', NS(x=MyType('f'), spam=MyType('g'))),
]
class TestTypeClassicClass(ParserTestCase):
"""Test a classic class type"""
class C:
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=C),
Sig('spam', type=C),
]
failures = []
successes = [
('a -x b', NS(x=C('b'), spam=C('a'))),
('-xf g', NS(x=C('f'), spam=C('g'))),
]
class TestTypeRegistration(TestCase):
"""Test a user-defined type by registering it"""
def test(self):
def get_my_type(string):
return 'my_type{%s}' % string
parser = argparse.ArgumentParser()
parser.register('type', 'my_type', get_my_type)
parser.add_argument('-x', type='my_type')
parser.add_argument('y', type='my_type')
self.assertEqual(parser.parse_args('1'.split()),
NS(x=None, y='my_type{1}'))
self.assertEqual(parser.parse_args('-x 1 42'.split()),
NS(x='my_type{1}', y='my_type{42}'))
# ============
# Action tests
# ============
class TestActionUserDefined(ParserTestCase):
"""Test a user-defined option/argument action"""
class OptionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
# check destination and option string
assert self.dest == 'spam', 'dest: %s' % self.dest
assert option_string == '-s', 'flag: %s' % option_string
# when option is before argument, badger=2, and when
# option is after argument, badger=<whatever was set>
expected_ns = NS(spam=0.25)
if value in [0.125, 0.625]:
expected_ns.badger = 2
elif value in [2.0]:
expected_ns.badger = 84
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('opt_action failed: %s' % e)
setattr(namespace, 'spam', value)
class PositionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
assert option_string is None, ('option_string: %s' %
option_string)
# check destination
assert self.dest == 'badger', 'dest: %s' % self.dest
# when argument is before option, spam=0.25, and when
# option is after argument, spam=<whatever was set>
expected_ns = NS(badger=2)
if value in [42, 84]:
expected_ns.spam = 0.25
elif value in [1]:
expected_ns.spam = 0.625
elif value in [2]:
expected_ns.spam = 0.125
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('arg_action failed: %s' % e)
setattr(namespace, 'badger', value)
argument_signatures = [
Sig('-s', dest='spam', action=OptionalAction,
type=float, default=0.25),
Sig('badger', action=PositionalAction,
type=int, nargs='?', default=2),
]
failures = []
successes = [
('-s0.125', NS(spam=0.125, badger=2)),
('42', NS(spam=0.25, badger=42)),
('-s 0.625 1', NS(spam=0.625, badger=1)),
('84 -s2', NS(spam=2.0, badger=84)),
]
class TestActionRegistration(TestCase):
"""Test a user-defined action supplied by registering it"""
class MyAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, 'foo[%s]' % values)
def test(self):
parser = argparse.ArgumentParser()
parser.register('action', 'my_action', self.MyAction)
parser.add_argument('badger', action='my_action')
self.assertEqual(parser.parse_args(['1']), NS(badger='foo[1]'))
self.assertEqual(parser.parse_args(['42']), NS(badger='foo[42]'))
# ================
# Subparsers tests
# ================
class TestAddSubparsers(TestCase):
"""Test the add_subparsers method"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def _get_parser(self, subparser_help=False, prefix_chars=None,
aliases=False):
# create a parser with a subparsers argument
if prefix_chars:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description', prefix_chars=prefix_chars)
parser.add_argument(
prefix_chars[0] * 2 + 'foo', action='store_true', help='foo help')
else:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
'--foo', action='store_true', help='foo help')
parser.add_argument(
'bar', type=float, help='bar help')
# check that only one subparsers argument can be added
subparsers_kwargs = {}
if aliases:
subparsers_kwargs['metavar'] = 'COMMAND'
subparsers_kwargs['title'] = 'commands'
else:
subparsers_kwargs['help'] = 'command help'
subparsers = parser.add_subparsers(**subparsers_kwargs)
self.assertArgumentParserError(parser.add_subparsers)
# add first sub-parser
parser1_kwargs = dict(description='1 description')
if subparser_help:
parser1_kwargs['help'] = '1 help'
if aliases:
parser1_kwargs['aliases'] = ['1alias1', '1alias2']
parser1 = subparsers.add_parser('1', **parser1_kwargs)
parser1.add_argument('-w', type=int, help='w help')
parser1.add_argument('x', choices='abc', help='x help')
# add second sub-parser
parser2_kwargs = dict(description='2 description')
if subparser_help:
parser2_kwargs['help'] = '2 help'
parser2 = subparsers.add_parser('2', **parser2_kwargs)
parser2.add_argument('-y', choices='123', help='y help')
parser2.add_argument('z', type=complex, nargs='*', help='z help')
# return the main parser
return parser
def setUp(self):
super().setUp()
self.parser = self._get_parser()
self.command_help_parser = self._get_parser(subparser_help=True)
def test_parse_args_failures(self):
# check some failure cases:
for args_str in ['', 'a', 'a a', '0.5 a', '0.5 1',
'0.5 1 -y', '0.5 2 -w']:
args = args_str.split()
self.assertArgumentParserError(self.parser.parse_args, args)
def test_parse_args(self):
# check some non-failure cases:
self.assertEqual(
self.parser.parse_args('0.5 1 b -w 7'.split()),
NS(foo=False, bar=0.5, w=7, x='b'),
)
self.assertEqual(
self.parser.parse_args('0.25 --foo 2 -y 2 3j -- -1j'.split()),
NS(foo=True, bar=0.25, y='2', z=[3j, -1j]),
)
self.assertEqual(
self.parser.parse_args('--foo 0.125 1 c'.split()),
NS(foo=True, bar=0.125, w=None, x='c'),
)
def test_parse_known_args(self):
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), []),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -p 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7 -p'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -q -rs -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-q', '-rs']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -W 1 b -X Y -w 7 Z'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-W', '-X', 'Y', 'Z']),
)
def test_dest(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('--foo', action='store_true')
subparsers = parser.add_subparsers(dest='bar')
parser1 = subparsers.add_parser('1')
parser1.add_argument('baz')
self.assertEqual(NS(foo=False, bar='1', baz='2'),
parser.parse_args('1 2'.split()))
def test_help(self):
self.assertEqual(self.parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(self.parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_help_extra_prefix_chars(self):
# Make sure - is still used for help if it is a non-first prefix char
parser = self._get_parser(prefix_chars='+:-')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [++foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [++foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
-h, --help show this help message and exit
++foo foo help
'''))
def test_help_alternate_prefix_chars(self):
parser = self._get_parser(prefix_chars='+:/')
self.assertEqual(parser.format_usage(),
'usage: PROG [+h] [++foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [+h] [++foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
+h, ++help show this help message and exit
++foo foo help
'''))
def test_parser_command_help(self):
self.assertEqual(self.command_help_parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(self.command_help_parser.format_help(),
textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
1 1 help
2 2 help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_subparser_title_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG',
description='main description')
parser.add_argument('--foo', action='store_true', help='foo help')
parser.add_argument('bar', help='bar help')
subparsers = parser.add_subparsers(title='subcommands',
description='command help',
help='additional text')
parser1 = subparsers.add_parser('1')
parser2 = subparsers.add_parser('2')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
optional arguments:
-h, --help show this help message and exit
--foo foo help
subcommands:
command help
{1,2} additional text
'''))
def _test_subparser_help(self, args_str, expected_help):
try:
self.parser.parse_args(args_str.split())
except ArgumentParserError:
err = sys.exc_info()[1]
if err.stdout != expected_help:
print(repr(expected_help))
print(repr(err.stdout))
self.assertEqual(err.stdout, expected_help)
def test_subparser1_help(self):
self._test_subparser_help('5.0 1 -h', textwrap.dedent('''\
usage: PROG bar 1 [-h] [-w W] {a,b,c}
1 description
positional arguments:
{a,b,c} x help
optional arguments:
-h, --help show this help message and exit
-w W w help
'''))
def test_subparser2_help(self):
self._test_subparser_help('5.0 2 -h', textwrap.dedent('''\
usage: PROG bar 2 [-h] [-y {1,2,3}] [z [z ...]]
2 description
positional arguments:
z z help
optional arguments:
-h, --help show this help message and exit
-y {1,2,3} y help
'''))
def test_alias_invocation(self):
parser = self._get_parser(aliases=True)
self.assertEqual(
parser.parse_known_args('0.5 1alias1 b'.split()),
(NS(foo=False, bar=0.5, w=None, x='b'), []),
)
self.assertEqual(
parser.parse_known_args('0.5 1alias2 b'.split()),
(NS(foo=False, bar=0.5, w=None, x='b'), []),
)
def test_error_alias_invocation(self):
parser = self._get_parser(aliases=True)
self.assertArgumentParserError(parser.parse_args,
'0.5 1alias3 b'.split())
def test_alias_help(self):
parser = self._get_parser(aliases=True, subparser_help=True)
self.maxDiff = None
self.assertEqual(parser.format_help(), textwrap.dedent("""\
usage: PROG [-h] [--foo] bar COMMAND ...
main description
positional arguments:
bar bar help
optional arguments:
-h, --help show this help message and exit
--foo foo help
commands:
COMMAND
1 (1alias1, 1alias2)
1 help
2 2 help
"""))
# ============
# Groups tests
# ============
class TestPositionalsGroups(TestCase):
"""Tests that order of group positionals matches construction order"""
def test_nongroup_first(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('foo')
group = parser.add_argument_group('g')
group.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_group_first(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
group.add_argument('foo')
parser.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_interleaved_groups(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
parser.add_argument('foo')
group.add_argument('bar')
parser.add_argument('baz')
group = parser.add_argument_group('yyy')
group.add_argument('frell')
expected = NS(foo='1', bar='2', baz='3', frell='4')
result = parser.parse_args('1 2 3 4'.split())
self.assertEqual(expected, result)
# ===================
# Parent parser tests
# ===================
class TestParentParsers(TestCase):
"""Tests that parsers can be created with parent parsers"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def setUp(self):
super().setUp()
self.wxyz_parent = ErrorRaisingArgumentParser(add_help=False)
self.wxyz_parent.add_argument('--w')
x_group = self.wxyz_parent.add_argument_group('x')
x_group.add_argument('-y')
self.wxyz_parent.add_argument('z')
self.abcd_parent = ErrorRaisingArgumentParser(add_help=False)
self.abcd_parent.add_argument('a')
self.abcd_parent.add_argument('-b')
c_group = self.abcd_parent.add_argument_group('c')
c_group.add_argument('--d')
self.w_parent = ErrorRaisingArgumentParser(add_help=False)
self.w_parent.add_argument('--w')
self.z_parent = ErrorRaisingArgumentParser(add_help=False)
self.z_parent.add_argument('z')
# parents with mutually exclusive groups
self.ab_mutex_parent = ErrorRaisingArgumentParser(add_help=False)
group = self.ab_mutex_parent.add_mutually_exclusive_group()
group.add_argument('-a', action='store_true')
group.add_argument('-b', action='store_true')
self.main_program = os.path.basename(sys.argv[0])
def test_single_parent(self):
parser = ErrorRaisingArgumentParser(parents=[self.wxyz_parent])
self.assertEqual(parser.parse_args('-y 1 2 --w 3'.split()),
NS(w='3', y='1', z='2'))
def test_single_parent_mutex(self):
self._test_mutex_ab(self.ab_mutex_parent.parse_args)
parser = ErrorRaisingArgumentParser(parents=[self.ab_mutex_parent])
self._test_mutex_ab(parser.parse_args)
def test_single_granparent_mutex(self):
parents = [self.ab_mutex_parent]
parser = ErrorRaisingArgumentParser(add_help=False, parents=parents)
parser = ErrorRaisingArgumentParser(parents=[parser])
self._test_mutex_ab(parser.parse_args)
def _test_mutex_ab(self, parse_args):
self.assertEqual(parse_args([]), NS(a=False, b=False))
self.assertEqual(parse_args(['-a']), NS(a=True, b=False))
self.assertEqual(parse_args(['-b']), NS(a=False, b=True))
self.assertArgumentParserError(parse_args, ['-a', '-b'])
self.assertArgumentParserError(parse_args, ['-b', '-a'])
self.assertArgumentParserError(parse_args, ['-c'])
self.assertArgumentParserError(parse_args, ['-a', '-c'])
self.assertArgumentParserError(parse_args, ['-b', '-c'])
def test_multiple_parents(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('--d 1 --w 2 3 4'.split()),
NS(a='3', b=None, d='1', w='2', y=None, z='4'))
def test_multiple_parents_mutex(self):
parents = [self.ab_mutex_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('-a --w 2 3'.split()),
NS(a=True, b=False, w='2', y=None, z='3'))
self.assertArgumentParserError(
parser.parse_args, '-a --w 2 3 -b'.split())
self.assertArgumentParserError(
parser.parse_args, '-a -b --w 2 3'.split())
def test_conflicting_parents(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.w_parent, self.wxyz_parent])
def test_conflicting_parents_mutex(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.abcd_parent, self.ab_mutex_parent])
def test_same_argument_name_parents(self):
parents = [self.wxyz_parent, self.z_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('1 2'.split()),
NS(w=None, y=None, z='2'))
def test_subparser_parents(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
abcde_parser = subparsers.add_parser('bar', parents=[self.abcd_parent])
abcde_parser.add_argument('e')
self.assertEqual(parser.parse_args('bar -b 1 --d 2 3 4'.split()),
NS(a='3', b='1', d='2', e='4'))
def test_subparser_parents_mutex(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
parents = [self.ab_mutex_parent]
abc_parser = subparsers.add_parser('foo', parents=parents)
c_group = abc_parser.add_argument_group('c_group')
c_group.add_argument('c')
parents = [self.wxyz_parent, self.ab_mutex_parent]
wxyzabe_parser = subparsers.add_parser('bar', parents=parents)
wxyzabe_parser.add_argument('e')
self.assertEqual(parser.parse_args('foo -a 4'.split()),
NS(a=True, b=False, c='4'))
self.assertEqual(parser.parse_args('bar -b --w 2 3 4'.split()),
NS(a=False, b=True, w='2', y=None, z='3', e='4'))
self.assertArgumentParserError(
parser.parse_args, 'foo -a -b 4'.split())
self.assertArgumentParserError(
parser.parse_args, 'bar -b -a 4'.split())
def test_parent_help(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
parser_help = parser.format_help()
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {} [-h] [-b B] [--d D] [--w W] [-y Y] a z
positional arguments:
a
z
optional arguments:
-h, --help show this help message and exit
-b B
--w W
c:
--d D
x:
-y Y
'''.format(self.main_program)))
def test_groups_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
g = parent.add_argument_group(title='g', description='gd')
g.add_argument('-w')
g.add_argument('-x')
m = parent.add_mutually_exclusive_group()
m.add_argument('-y')
m.add_argument('-z')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertRaises(ArgumentParserError, parser.parse_args,
['-y', 'Y', '-z', 'Z'])
parser_help = parser.format_help()
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {} [-h] [-w W] [-x X] [-y Y | -z Z]
optional arguments:
-h, --help show this help message and exit
-y Y
-z Z
g:
gd
-w W
-x X
'''.format(self.main_program)))
# ==============================
# Mutually exclusive group tests
# ==============================
class TestMutuallyExclusiveGroupErrors(TestCase):
def test_invalid_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
raises = self.assertRaises
raises(TypeError, parser.add_mutually_exclusive_group, title='foo')
def test_invalid_add_argument(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_mutually_exclusive_group()
add_argument = group.add_argument
raises = self.assertRaises
raises(ValueError, add_argument, '--foo', required=True)
raises(ValueError, add_argument, 'bar')
raises(ValueError, add_argument, 'bar', nargs='+')
raises(ValueError, add_argument, 'bar', nargs=1)
raises(ValueError, add_argument, 'bar', nargs=argparse.PARSER)
def test_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('--foo', action='store_true')
group1.add_argument('--bar', action='store_false')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--soup', action='store_true')
group2.add_argument('--nuts', action='store_false')
expected = '''\
usage: PROG [-h] [--foo | --bar] [--soup | --nuts]
optional arguments:
-h, --help show this help message and exit
--foo
--bar
--soup
--nuts
'''
self.assertEqual(parser.format_help(), textwrap.dedent(expected))
class MEMixin(object):
def test_failures_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
error = ArgumentParserError
for args_string in self.failures:
self.assertRaises(error, parse_args, args_string.split())
def test_failures_when_required(self):
parse_args = self.get_parser(required=True).parse_args
error = ArgumentParserError
for args_string in self.failures + ['']:
self.assertRaises(error, parse_args, args_string.split())
def test_successes_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
successes = self.successes + self.successes_when_not_required
for args_string, expected_ns in successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_successes_when_required(self):
parse_args = self.get_parser(required=True).parse_args
for args_string, expected_ns in self.successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_usage_when_not_required(self):
format_usage = self.get_parser(required=False).format_usage
expected_usage = self.usage_when_not_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_usage_when_required(self):
format_usage = self.get_parser(required=True).format_usage
expected_usage = self.usage_when_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_help_when_not_required(self):
format_help = self.get_parser(required=False).format_help
help = self.usage_when_not_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
def test_help_when_required(self):
format_help = self.get_parser(required=True).format_help
help = self.usage_when_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
class TestMutuallyExclusiveSimple(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--bar', help='bar help')
group.add_argument('--baz', nargs='?', const='Z', help='baz help')
return parser
failures = ['--bar X --baz Y', '--bar X --baz']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--bar X --bar Z', NS(bar='Z', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
('--baz', NS(bar=None, baz='Z')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz [BAZ]]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz [BAZ])
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--bar BAR bar help
--baz [BAZ] baz help
'''
class TestMutuallyExclusiveLong(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('--abcde', help='abcde help')
parser.add_argument('--fghij', help='fghij help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--klmno', help='klmno help')
group.add_argument('--pqrst', help='pqrst help')
return parser
failures = ['--klmno X --pqrst Y']
successes = [
('--klmno X', NS(abcde=None, fghij=None, klmno='X', pqrst=None)),
('--abcde Y --klmno X',
NS(abcde='Y', fghij=None, klmno='X', pqrst=None)),
('--pqrst X', NS(abcde=None, fghij=None, klmno=None, pqrst='X')),
('--pqrst X --fghij Y',
NS(abcde=None, fghij='Y', klmno=None, pqrst='X')),
]
successes_when_not_required = [
('', NS(abcde=None, fghij=None, klmno=None, pqrst=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
[--klmno KLMNO | --pqrst PQRST]
'''
usage_when_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
(--klmno KLMNO | --pqrst PQRST)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--abcde ABCDE abcde help
--fghij FGHIJ fghij help
--klmno KLMNO klmno help
--pqrst PQRST pqrst help
'''
class TestMutuallyExclusiveFirstSuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-x', help=argparse.SUPPRESS)
group.add_argument('-y', action='store_false', help='y help')
return parser
failures = ['-x X -y']
successes = [
('-x X', NS(x='X', y=True)),
('-x X -x Y', NS(x='Y', y=True)),
('-y', NS(x=None, y=False)),
]
successes_when_not_required = [
('', NS(x=None, y=True)),
]
usage_when_not_required = '''\
usage: PROG [-h] [-y]
'''
usage_when_required = '''\
usage: PROG [-h] -y
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-y y help
'''
class TestMutuallyExclusiveManySuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
add = group.add_argument
add('--spam', action='store_true', help=argparse.SUPPRESS)
add('--badger', action='store_false', help=argparse.SUPPRESS)
add('--bladder', help=argparse.SUPPRESS)
return parser
failures = [
'--spam --badger',
'--badger --bladder B',
'--bladder B --spam',
]
successes = [
('--spam', NS(spam=True, badger=True, bladder=None)),
('--badger', NS(spam=False, badger=False, bladder=None)),
('--bladder B', NS(spam=False, badger=True, bladder='B')),
('--spam --spam', NS(spam=True, badger=True, bladder=None)),
]
successes_when_not_required = [
('', NS(spam=False, badger=True, bladder=None)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
'''
class TestMutuallyExclusiveOptionalAndPositional(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--foo', action='store_true', help='FOO')
group.add_argument('--spam', help='SPAM')
group.add_argument('badger', nargs='*', default='X', help='BADGER')
return parser
failures = [
'--foo --spam S',
'--spam S X',
'X --foo',
'X Y Z --spam S',
'--foo X Y',
]
successes = [
('--foo', NS(foo=True, spam=None, badger='X')),
('--spam S', NS(foo=False, spam='S', badger='X')),
('X', NS(foo=False, spam=None, badger=['X'])),
('X Y Z', NS(foo=False, spam=None, badger=['X', 'Y', 'Z'])),
]
successes_when_not_required = [
('', NS(foo=False, spam=None, badger='X')),
]
usage_when_not_required = '''\
usage: PROG [-h] [--foo | --spam SPAM | badger [badger ...]]
'''
usage_when_required = '''\
usage: PROG [-h] (--foo | --spam SPAM | badger [badger ...])
'''
help = '''\
positional arguments:
badger BADGER
optional arguments:
-h, --help show this help message and exit
--foo FOO
--spam SPAM SPAM
'''
class TestMutuallyExclusiveOptionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('-x', action='store_true', help='x help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-a', action='store_true', help='a help')
group.add_argument('-b', action='store_true', help='b help')
parser.add_argument('-y', action='store_true', help='y help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['-a -b', '-b -c', '-a -c', '-a -b -c']
successes = [
('-a', NS(a=True, b=False, c=False, x=False, y=False)),
('-b', NS(a=False, b=True, c=False, x=False, y=False)),
('-c', NS(a=False, b=False, c=True, x=False, y=False)),
('-a -x', NS(a=True, b=False, c=False, x=True, y=False)),
('-y -b', NS(a=False, b=True, c=False, x=False, y=True)),
('-x -y -c', NS(a=False, b=False, c=True, x=True, y=True)),
]
successes_when_not_required = [
('', NS(a=False, b=False, c=False, x=False, y=False)),
('-x', NS(a=False, b=False, c=False, x=True, y=False)),
('-y', NS(a=False, b=False, c=False, x=False, y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-x] [-a] [-b] [-y] [-c]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-x x help
-a a help
-b b help
-y y help
-c c help
'''
class TestMutuallyExclusiveInGroup(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
titled_group = parser.add_argument_group(
title='Titled group', description='Group description')
mutex_group = \
titled_group.add_mutually_exclusive_group(required=required)
mutex_group.add_argument('--bar', help='bar help')
mutex_group.add_argument('--baz', help='baz help')
return parser
failures = ['--bar X --baz Y', '--baz X --bar Y']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz BAZ]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz BAZ)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
Titled group:
Group description
--bar BAR bar help
--baz BAZ baz help
'''
class TestMutuallyExclusiveOptionalsAndPositionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('x', help='x help')
parser.add_argument('-y', action='store_true', help='y help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('a', nargs='?', help='a help')
group.add_argument('-b', action='store_true', help='b help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['X A -b', '-b -c', '-c X A']
successes = [
('X A', NS(a='A', b=False, c=False, x='X', y=False)),
('X -b', NS(a=None, b=True, c=False, x='X', y=False)),
('X -c', NS(a=None, b=False, c=True, x='X', y=False)),
('X A -y', NS(a='A', b=False, c=False, x='X', y=True)),
('X -y -b', NS(a=None, b=True, c=False, x='X', y=True)),
]
successes_when_not_required = [
('X', NS(a=None, b=False, c=False, x='X', y=False)),
('X -y', NS(a=None, b=False, c=False, x='X', y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-y] [-b] [-c] x [a]
'''
help = '''\
positional arguments:
x x help
a a help
optional arguments:
-h, --help show this help message and exit
-y y help
-b b help
-c c help
'''
# =================================================
# Mutually exclusive group in parent parser tests
# =================================================
class MEPBase(object):
def get_parser(self, required=None):
parent = super(MEPBase, self).get_parser(required=required)
parser = ErrorRaisingArgumentParser(
prog=parent.prog, add_help=False, parents=[parent])
return parser
class TestMutuallyExclusiveGroupErrorsParent(
MEPBase, TestMutuallyExclusiveGroupErrors):
pass
class TestMutuallyExclusiveSimpleParent(
MEPBase, TestMutuallyExclusiveSimple):
pass
class TestMutuallyExclusiveLongParent(
MEPBase, TestMutuallyExclusiveLong):
pass
class TestMutuallyExclusiveFirstSuppressedParent(
MEPBase, TestMutuallyExclusiveFirstSuppressed):
pass
class TestMutuallyExclusiveManySuppressedParent(
MEPBase, TestMutuallyExclusiveManySuppressed):
pass
class TestMutuallyExclusiveOptionalAndPositionalParent(
MEPBase, TestMutuallyExclusiveOptionalAndPositional):
pass
class TestMutuallyExclusiveOptionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsMixed):
pass
class TestMutuallyExclusiveOptionalsAndPositionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsAndPositionalsMixed):
pass
# =================
# Set default tests
# =================
class TestSetDefaults(TestCase):
def test_set_defaults_no_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
parser.set_defaults(y='bar', z=1)
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([]))
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar', z=1),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='baz', y='bar', z=2),
parser.parse_args([], NS(x='baz', z=2)))
def test_set_defaults_with_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo', y='bar')
parser.add_argument('-x', default='xfoox')
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([]))
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar'),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS(x='baz')))
def test_set_defaults_subparsers(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
subparsers = parser.add_subparsers()
parser_a = subparsers.add_parser('a')
parser_a.set_defaults(y='bar')
self.assertEqual(NS(x='foo', y='bar'),
parser.parse_args('a'.split()))
def test_set_defaults_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
parent.set_defaults(x='foo')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertEqual(NS(x='foo'), parser.parse_args([]))
def test_set_defaults_same_as_add_argument(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
parser.add_argument('-w')
parser.add_argument('-x', default='XX')
parser.add_argument('y', nargs='?')
parser.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
def test_set_defaults_same_as_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
group = parser.add_argument_group('foo')
group.add_argument('-w')
group.add_argument('-x', default='XX')
group.add_argument('y', nargs='?')
group.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
# =================
# Get default tests
# =================
class TestGetDefault(TestCase):
def test_get_default(self):
parser = ErrorRaisingArgumentParser()
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--foo")
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--bar", type=int, default=42)
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
parser.set_defaults(foo="badger")
self.assertEqual("badger", parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
# ==========================
# Namespace 'contains' tests
# ==========================
class TestNamespaceContainsSimple(TestCase):
def test_empty(self):
ns = argparse.Namespace()
self.assertEqual('' in ns, False)
self.assertEqual('' not in ns, True)
self.assertEqual('x' in ns, False)
def test_non_empty(self):
ns = argparse.Namespace(x=1, y=2)
self.assertEqual('x' in ns, True)
self.assertEqual('x' not in ns, False)
self.assertEqual('y' in ns, True)
self.assertEqual('' in ns, False)
self.assertEqual('xx' in ns, False)
self.assertEqual('z' in ns, False)
# =====================
# Help formatting tests
# =====================
class TestHelpFormattingMetaclass(type):
def __init__(cls, name, bases, bodydict):
if name == 'HelpTestCase':
return
class AddTests(object):
def __init__(self, test_class, func_suffix, std_name):
self.func_suffix = func_suffix
self.std_name = std_name
for test_func in [self.test_format,
self.test_print,
self.test_print_file]:
test_name = '%s_%s' % (test_func.__name__, func_suffix)
def test_wrapper(self, test_func=test_func):
test_func(self)
try:
test_wrapper.__name__ = test_name
except TypeError:
pass
setattr(test_class, test_name, test_wrapper)
def _get_parser(self, tester):
parser = argparse.ArgumentParser(
*tester.parser_signature.args,
**tester.parser_signature.kwargs)
for argument_sig in tester.argument_signatures:
parser.add_argument(*argument_sig.args,
**argument_sig.kwargs)
group_signatures = tester.argument_group_signatures
for group_sig, argument_sigs in group_signatures:
group = parser.add_argument_group(*group_sig.args,
**group_sig.kwargs)
for argument_sig in argument_sigs:
group.add_argument(*argument_sig.args,
**argument_sig.kwargs)
return parser
def _test(self, tester, parser_text):
expected_text = getattr(tester, self.func_suffix)
expected_text = textwrap.dedent(expected_text)
if expected_text != parser_text:
print(repr(expected_text))
print(repr(parser_text))
for char1, char2 in zip(expected_text, parser_text):
if char1 != char2:
print('first diff: %r %r' % (char1, char2))
break
tester.assertEqual(expected_text, parser_text)
def test_format(self, tester):
parser = self._get_parser(tester)
format = getattr(parser, 'format_%s' % self.func_suffix)
self._test(tester, format())
def test_print(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
old_stream = getattr(sys, self.std_name)
setattr(sys, self.std_name, StdIOBuffer())
try:
print_()
parser_text = getattr(sys, self.std_name).getvalue()
finally:
setattr(sys, self.std_name, old_stream)
self._test(tester, parser_text)
def test_print_file(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
sfile = StdIOBuffer()
print_(sfile)
parser_text = sfile.getvalue()
self._test(tester, parser_text)
# add tests for {format,print}_{usage,help,version}
for func_suffix, std_name in [('usage', 'stdout'),
('help', 'stdout'),
('version', 'stderr')]:
AddTests(cls, func_suffix, std_name)
bases = TestCase,
HelpTestCase = TestHelpFormattingMetaclass('HelpTestCase', bases, {})
class TestHelpBiggerOptionals(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG', version='0.1')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] foo bar
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerOptionalGroups(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG', version='0.1')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = [
(Sig('GROUP TITLE', description='GROUP DESCRIPTION'), [
Sig('baz', help='BAZ HELP'),
Sig('-z', nargs='+', help='Z HELP')]),
]
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] [-z Z [Z ...]] foo bar baz
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
GROUP TITLE:
GROUP DESCRIPTION
baz BAZ HELP
-z Z [Z ...] Z HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerPositionals(HelpTestCase):
"""Make sure that help aligns when arguments are longer"""
parser_signature = Sig(usage='USAGE', description='DESCRIPTION')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('ekiekiekifekang', help='EKI HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: USAGE
'''
help = usage + '''\
DESCRIPTION
positional arguments:
ekiekiekifekang EKI HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-x X HELP
--y Y Y HELP
'''
version = ''
class TestHelpReformatting(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(
prog='PROG',
description=' oddly formatted\n'
'description\n'
'\n'
'that is so long that it should go onto multiple '
'lines when wrapped')
argument_signatures = [
Sig('-x', metavar='XX', help='oddly\n'
' formatted -x help'),
Sig('y', metavar='yyy', help='normal y help'),
]
argument_group_signatures = [
(Sig('title', description='\n'
' oddly formatted group\n'
'\n'
'description'),
[Sig('-a', action='store_true',
help=' oddly \n'
'formatted -a help \n'
' again, so long that it should be wrapped over '
'multiple lines')]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
oddly formatted description that is so long that it should go onto \
multiple
lines when wrapped
positional arguments:
yyy normal y help
optional arguments:
-h, --help show this help message and exit
-x XX oddly formatted -x help
title:
oddly formatted group description
-a oddly formatted -a help again, so long that it should \
be wrapped
over multiple lines
'''
version = ''
class TestHelpWrappingShortNames(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(prog='PROG', description= 'D\nD' * 30)
argument_signatures = [
Sig('-x', metavar='XX', help='XHH HX' * 20),
Sig('y', metavar='yyy', help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', action='store_true', help='AHHH HHA' * 10)]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyy YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-x XX XHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH \
HXXHH HXXHH
HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HX
ALPHAS:
-a AHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH \
HHAAHHH
HHAAHHH HHAAHHH HHA
'''
version = ''
class TestHelpWrappingLongNames(HelpTestCase):
"""Make sure that text after long names starts on the next line"""
parser_signature = Sig(usage='USAGE', description= 'D D' * 30,
version='V V'*30)
argument_signatures = [
Sig('-x', metavar='X' * 25, help='XH XH' * 20),
Sig('y', metavar='y' * 25, help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', metavar='A' * 25, help='AH AH' * 20),
Sig('z', metavar='z' * 25, help='ZH ZH' * 20)]),
]
usage = '''\
usage: USAGE
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyyyyyyyyyyyyyyyyyyyyyyyy
YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
XH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH \
XHXH XHXH
XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XH
ALPHAS:
-a AAAAAAAAAAAAAAAAAAAAAAAAA
AH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH \
AHAH AHAH
AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AH
zzzzzzzzzzzzzzzzzzzzzzzzz
ZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH \
ZHZH ZHZH
ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZH
'''
version = '''\
V VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV \
VV VV VV
VV VV VV VV V
'''
class TestHelpUsage(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', nargs='+', help='w'),
Sig('-x', nargs='*', help='x'),
Sig('a', help='a'),
Sig('b', help='b', nargs=2),
Sig('c', help='c', nargs='?'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-y', nargs='?', help='y'),
Sig('-z', nargs=3, help='z'),
Sig('d', help='d', nargs='*'),
Sig('e', help='e', nargs='+'),
])
]
usage = '''\
usage: PROG [-h] [-w W [W ...]] [-x [X [X ...]]] [-y [Y]] [-z Z Z Z]
a b b [c] [d [d ...]] e [e ...]
'''
help = usage + '''\
positional arguments:
a a
b b
c c
optional arguments:
-h, --help show this help message and exit
-w W [W ...] w
-x [X [X ...]] x
group:
-y [Y] y
-z Z Z Z z
d d
e e
'''
version = ''
class TestHelpOnlyUserGroups(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = [
(Sig('xxxx'), [
Sig('-x', help='x'),
Sig('a', help='a'),
]),
(Sig('yyyy'), [
Sig('b', help='b'),
Sig('-y', help='y'),
]),
]
usage = '''\
usage: PROG [-x X] [-y Y] a b
'''
help = usage + '''\
xxxx:
-x X x
a a
yyyy:
b b
-y Y y
'''
version = ''
class TestHelpUsageLongProg(HelpTestCase):
"""Test usage messages where the prog is long"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W'),
Sig('-x', metavar='X'),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w W] [-x X] a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w W
-x X
'''
version = ''
class TestHelpUsageLongProgOptionsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the optionals wrap"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] [-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageLongProgPositionalsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the positionals wrap"""
parser_signature = Sig(prog='P' * 60, add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpUsageOptionalsWrap(HelpTestCase):
"""Test usage messages where the optionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
Sig('c'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] \
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b c
'''
help = usage + '''\
positional arguments:
a
b
c
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsWrap(HelpTestCase):
"""Test usage messages where the positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x'),
Sig('-y'),
Sig('-z'),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x X] [-y Y] [-z Z]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x X
-y Y
-z Z
'''
version = ''
class TestHelpUsageOptionalsPositionalsWrap(HelpTestCase):
"""Test usage messages where the optionals and positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageOptionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only optionals and they wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only positionals and they wrap"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpVariableExpansion(HelpTestCase):
"""Test that variables are expanded properly in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', type=int,
help='x %(prog)s %(default)s %(type)s %%'),
Sig('-y', action='store_const', default=42, const='XXX',
help='y %(prog)s %(default)s %(const)s'),
Sig('--foo', choices='abc',
help='foo %(prog)s %(default)s %(choices)s'),
Sig('--bar', default='baz', choices=[1, 2], metavar='BBB',
help='bar %(prog)s %(default)s %(dest)s'),
Sig('spam', help='spam %(prog)s %(default)s'),
Sig('badger', default=0.5, help='badger %(prog)s %(default)s'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-a', help='a %(prog)s %(default)s'),
Sig('-b', default=-1, help='b %(prog)s %(default)s'),
])
]
usage = ('''\
usage: PROG [-h] [-x X] [-y] [--foo {a,b,c}] [--bar BBB] [-a A] [-b B]
spam badger
''')
help = usage + '''\
positional arguments:
spam spam PROG None
badger badger PROG 0.5
optional arguments:
-h, --help show this help message and exit
-x X x PROG None int %
-y y PROG 42 XXX
--foo {a,b,c} foo PROG None a, b, c
--bar BBB bar PROG baz bar
group:
-a A a PROG None
-b B b PROG -1
'''
version = ''
class TestHelpVariableExpansionUsageSupplied(HelpTestCase):
"""Test that variables are expanded properly when usage= is present"""
parser_signature = Sig(prog='PROG', usage='%(prog)s FOO')
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG FOO
''')
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
'''
version = ''
class TestHelpVariableExpansionNoArguments(HelpTestCase):
"""Test that variables are expanded properly with no arguments"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG
''')
help = usage
version = ''
class TestHelpSuppressUsage(HelpTestCase):
"""Test that items can be suppressed in usage messages"""
parser_signature = Sig(prog='PROG', usage=argparse.SUPPRESS)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
help = '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
usage = ''
version = ''
class TestHelpSuppressOptional(HelpTestCase):
"""Test that optional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help=argparse.SUPPRESS),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG spam
'''
help = usage + '''\
positional arguments:
spam spam help
'''
version = ''
class TestHelpSuppressOptionalGroup(HelpTestCase):
"""Test that optional groups can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('group'), [Sig('--bar', help=argparse.SUPPRESS)]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpSuppressPositional(HelpTestCase):
"""Test that positional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help=argparse.SUPPRESS),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpRequiredOptional(HelpTestCase):
"""Test that required options don't look optional"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', required=True, help='foo help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] --foo FOO
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpAlternatePrefixChars(HelpTestCase):
"""Test that options display with different prefix characters"""
parser_signature = Sig(prog='PROG', prefix_chars='^;', add_help=False)
argument_signatures = [
Sig('^^foo', action='store_true', help='foo help'),
Sig(';b', ';;bar', help='bar help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [^^foo] [;b BAR]
'''
help = usage + '''\
optional arguments:
^^foo foo help
;b BAR, ;;bar BAR bar help
'''
version = ''
class TestHelpNoHelpOptional(HelpTestCase):
"""Test that the --help argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
--foo FOO foo help
'''
version = ''
class TestHelpVersionOptional(HelpTestCase):
"""Test that the --version argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', version='1.0')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
--foo FOO foo help
'''
version = '''\
1.0
'''
class TestHelpNone(HelpTestCase):
"""Test that no errors occur if no help is specified"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo'),
Sig('spam'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam
optional arguments:
-h, --help show this help message and exit
--foo FOO
'''
version = ''
class TestHelpTupleMetavar(HelpTestCase):
"""Test specifying metavar as a tuple"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', help='w', nargs='+', metavar=('W1', 'W2')),
Sig('-x', help='x', nargs='*', metavar=('X1', 'X2')),
Sig('-y', help='y', nargs=3, metavar=('Y1', 'Y2', 'Y3')),
Sig('-z', help='z', nargs='?', metavar=('Z1', )),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w W1 [W2 ...]] [-x [X1 [X2 ...]]] [-y Y1 Y2 Y3] \
[-z [Z1]]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-w W1 [W2 ...] w
-x [X1 [X2 ...]] x
-y Y1 Y2 Y3 y
-z [Z1] z
'''
version = ''
class TestHelpRawText(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawTextHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should also\n'
'appear as given here'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should also
appear as given here
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpRawDescription(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawDescriptionHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should not\n'
' retain this odd formatting'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should not retain this odd formatting
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpArgumentDefaults(HelpTestCase):
"""Test the ArgumentDefaultsHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='description')
argument_signatures = [
Sig('--foo', help='foo help - oh and by the way, %(default)s'),
Sig('--bar', action='store_true', help='bar help'),
Sig('spam', help='spam help'),
Sig('badger', nargs='?', default='wooden', help='badger help'),
]
argument_group_signatures = [
(Sig('title', description='description'),
[Sig('--baz', type=int, default=42, help='baz help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar] [--baz BAZ] spam [badger]
'''
help = usage + '''\
description
positional arguments:
spam spam help
badger badger help (default: wooden)
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help - oh and by the way, None
--bar bar help (default: False)
title:
description
--baz BAZ baz help (default: 42)
'''
version = ''
class TestHelpVersionAction(HelpTestCase):
"""Test the default help for the version action"""
parser_signature = Sig(prog='PROG', description='description')
argument_signatures = [Sig('-V', '--version', action='version', version='3.6')]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-V]
'''
help = usage + '''\
description
optional arguments:
-h, --help show this help message and exit
-V, --version show program's version number and exit
'''
version = ''
# =====================================
# Optional/Positional constructor tests
# =====================================
class TestInvalidArgumentConstructors(TestCase):
"""Test a bunch of invalid Argument constructors"""
def assertTypeError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(TypeError, parser.add_argument,
*args, **kwargs)
def assertValueError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(ValueError, parser.add_argument,
*args, **kwargs)
def test_invalid_keyword_arguments(self):
self.assertTypeError('-x', bar=None)
self.assertTypeError('-y', callback='foo')
self.assertTypeError('-y', callback_args=())
self.assertTypeError('-y', callback_kwargs={})
def test_missing_destination(self):
self.assertTypeError()
for action in ['append', 'store']:
self.assertTypeError(action=action)
def test_invalid_option_strings(self):
self.assertValueError('--')
self.assertValueError('---')
def test_invalid_type(self):
self.assertValueError('--foo', type='int')
def test_invalid_action(self):
self.assertValueError('-x', action='foo')
self.assertValueError('foo', action='baz')
parser = argparse.ArgumentParser()
try:
parser.add_argument("--foo", action="store-true")
except ValueError:
e = sys.exc_info()[1]
expected = 'unknown action'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_multiple_dest(self):
parser = argparse.ArgumentParser()
parser.add_argument(dest='foo')
try:
parser.add_argument('bar', dest='baz')
except ValueError:
e = sys.exc_info()[1]
expected = 'dest supplied twice for positional argument'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_no_argument_actions(self):
for action in ['store_const', 'store_true', 'store_false',
'append_const', 'count']:
for attrs in [dict(type=int), dict(nargs='+'),
dict(choices='ab')]:
self.assertTypeError('-x', action=action, **attrs)
def test_no_argument_no_const_actions(self):
# options with zero arguments
for action in ['store_true', 'store_false', 'count']:
# const is always disallowed
self.assertTypeError('-x', const='foo', action=action)
# nargs is always disallowed
self.assertTypeError('-x', nargs='*', action=action)
def test_more_than_one_argument_actions(self):
for action in ['store', 'append']:
# nargs=0 is disallowed
self.assertValueError('-x', nargs=0, action=action)
self.assertValueError('spam', nargs=0, action=action)
# const is disallowed with non-optional arguments
for nargs in [1, '*', '+']:
self.assertValueError('-x', const='foo',
nargs=nargs, action=action)
self.assertValueError('spam', const='foo',
nargs=nargs, action=action)
def test_required_const_actions(self):
for action in ['store_const', 'append_const']:
# nargs is always disallowed
self.assertTypeError('-x', nargs='+', action=action)
def test_parsers_action_missing_params(self):
self.assertTypeError('command', action='parsers')
self.assertTypeError('command', action='parsers', prog='PROG')
self.assertTypeError('command', action='parsers',
parser_class=argparse.ArgumentParser)
def test_required_positional(self):
self.assertTypeError('foo', required=True)
def test_user_defined_action(self):
class Success(Exception):
pass
class Action(object):
def __init__(self,
option_strings,
dest,
const,
default,
required=False):
if dest == 'spam':
if const is Success:
if default is Success:
raise Success()
def __call__(self, *args, **kwargs):
pass
parser = argparse.ArgumentParser()
self.assertRaises(Success, parser.add_argument, '--spam',
action=Action, default=Success, const=Success)
self.assertRaises(Success, parser.add_argument, 'spam',
action=Action, default=Success, const=Success)
# ================================
# Actions returned by add_argument
# ================================
class TestActionsReturned(TestCase):
def test_dest(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo')
self.assertEqual(action.dest, 'foo')
action = parser.add_argument('-b', '--bar')
self.assertEqual(action.dest, 'bar')
action = parser.add_argument('-x', '-y')
self.assertEqual(action.dest, 'x')
def test_misc(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo', nargs='?', const=42,
default=84, type=int, choices=[1, 2],
help='FOO', metavar='BAR', dest='baz')
self.assertEqual(action.nargs, '?')
self.assertEqual(action.const, 42)
self.assertEqual(action.default, 84)
self.assertEqual(action.type, int)
self.assertEqual(action.choices, [1, 2])
self.assertEqual(action.help, 'FOO')
self.assertEqual(action.metavar, 'BAR')
self.assertEqual(action.dest, 'baz')
# ================================
# Argument conflict handling tests
# ================================
class TestConflictHandling(TestCase):
def test_bad_type(self):
self.assertRaises(ValueError, argparse.ArgumentParser,
conflict_handler='foo')
def test_conflict_error(self):
parser = argparse.ArgumentParser()
parser.add_argument('-x')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '-x')
parser.add_argument('--spam')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '--spam')
def test_resolve_error(self):
get_parser = argparse.ArgumentParser
parser = get_parser(prog='PROG', conflict_handler='resolve')
parser.add_argument('-x', help='OLD X')
parser.add_argument('-x', help='NEW X')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
'''))
parser.add_argument('--spam', metavar='OLD_SPAM')
parser.add_argument('--spam', metavar='NEW_SPAM')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X] [--spam NEW_SPAM]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
--spam NEW_SPAM
'''))
# =============================
# Help and Version option tests
# =============================
class TestOptionalsHelpVersionActions(TestCase):
"""Test the help and version actions"""
def _get_error(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except ArgumentParserError:
return sys.exc_info()[1]
else:
self.assertRaises(ArgumentParserError, func, *args, **kwargs)
def assertPrintHelpExit(self, parser, args_str):
self.assertEqual(
parser.format_help(),
self._get_error(parser.parse_args, args_str.split()).stdout)
def assertPrintVersionExit(self, parser, args_str):
self.assertEqual(
parser.format_version(),
self._get_error(parser.parse_args, args_str.split()).stderr)
def assertArgumentParserError(self, parser, *args):
self.assertRaises(ArgumentParserError, parser.parse_args, args)
def test_version(self):
parser = ErrorRaisingArgumentParser(version='1.0')
self.assertPrintHelpExit(parser, '-h')
self.assertPrintHelpExit(parser, '--help')
self.assertPrintVersionExit(parser, '-v')
self.assertPrintVersionExit(parser, '--version')
def test_version_format(self):
parser = ErrorRaisingArgumentParser(prog='PPP', version='%(prog)s 3.5')
msg = self._get_error(parser.parse_args, ['-v']).stderr
self.assertEqual('PPP 3.5\n', msg)
def test_version_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False, version='1.0')
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertPrintVersionExit(parser, '-v')
self.assertPrintVersionExit(parser, '--version')
def test_version_action(self):
parser = ErrorRaisingArgumentParser(prog='XXX')
parser.add_argument('-V', action='version', version='%(prog)s 3.7')
msg = self._get_error(parser.parse_args, ['-V']).stderr
self.assertEqual('XXX 3.7\n', msg)
def test_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False)
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_alternate_help_version(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('-x', action='help')
parser.add_argument('-y', action='version')
self.assertPrintHelpExit(parser, '-x')
self.assertPrintVersionExit(parser, '-y')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_help_version_extra_arguments(self):
parser = ErrorRaisingArgumentParser(version='1.0')
parser.add_argument('-x', action='store_true')
parser.add_argument('y')
# try all combinations of valid prefixes and suffixes
valid_prefixes = ['', '-x', 'foo', '-x bar', 'baz -x']
valid_suffixes = valid_prefixes + ['--bad-option', 'foo bar baz']
for prefix in valid_prefixes:
for suffix in valid_suffixes:
format = '%s %%s %s' % (prefix, suffix)
self.assertPrintHelpExit(parser, format % '-h')
self.assertPrintHelpExit(parser, format % '--help')
self.assertPrintVersionExit(parser, format % '-v')
self.assertPrintVersionExit(parser, format % '--version')
# ======================
# str() and repr() tests
# ======================
class TestStrings(TestCase):
"""Test str() and repr() on Optionals and Positionals"""
def assertStringEqual(self, obj, result_string):
for func in [str, repr]:
self.assertEqual(func(obj), result_string)
def test_optional(self):
option = argparse.Action(
option_strings=['--foo', '-a', '-b'],
dest='b',
type='int',
nargs='+',
default=42,
choices=[1, 2, 3],
help='HELP',
metavar='METAVAR')
string = (
"Action(option_strings=['--foo', '-a', '-b'], dest='b', "
"nargs='+', const=None, default=42, type='int', "
"choices=[1, 2, 3], help='HELP', metavar='METAVAR')")
self.assertStringEqual(option, string)
def test_argument(self):
argument = argparse.Action(
option_strings=[],
dest='x',
type=float,
nargs='?',
default=2.5,
choices=[0.5, 1.5, 2.5],
help='H HH H',
metavar='MV MV MV')
string = (
"Action(option_strings=[], dest='x', nargs='?', "
"const=None, default=2.5, type=%r, choices=[0.5, 1.5, 2.5], "
"help='H HH H', metavar='MV MV MV')" % float)
self.assertStringEqual(argument, string)
def test_namespace(self):
ns = argparse.Namespace(foo=42, bar='spam')
string = "Namespace(bar='spam', foo=42)"
self.assertStringEqual(ns, string)
def test_parser(self):
parser = argparse.ArgumentParser(prog='PROG')
string = (
"ArgumentParser(prog='PROG', usage=None, description=None, "
"version=None, formatter_class=%r, conflict_handler='error', "
"add_help=True)" % argparse.HelpFormatter)
self.assertStringEqual(parser, string)
# ===============
# Namespace tests
# ===============
class TestNamespace(TestCase):
def test_constructor(self):
ns = argparse.Namespace()
self.assertRaises(AttributeError, getattr, ns, 'x')
ns = argparse.Namespace(a=42, b='spam')
self.assertEqual(ns.a, 42)
self.assertEqual(ns.b, 'spam')
def test_equality(self):
ns1 = argparse.Namespace(a=1, b=2)
ns2 = argparse.Namespace(b=2, a=1)
ns3 = argparse.Namespace(a=1)
ns4 = argparse.Namespace(b=2)
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns1, ns3)
self.assertNotEqual(ns1, ns4)
self.assertNotEqual(ns2, ns3)
self.assertNotEqual(ns2, ns4)
self.assertTrue(ns1 != ns3)
self.assertTrue(ns1 != ns4)
self.assertTrue(ns2 != ns3)
self.assertTrue(ns2 != ns4)
# ===================
# File encoding tests
# ===================
class TestEncoding(TestCase):
def _test_module_encoding(self, path):
path, _ = os.path.splitext(path)
path += ".py"
with codecs.open(path, 'r', 'utf8') as f:
f.read()
def test_argparse_module_encoding(self):
self._test_module_encoding(argparse.__file__)
def test_test_argparse_module_encoding(self):
self._test_module_encoding(__file__)
# ===================
# ArgumentError tests
# ===================
class TestArgumentError(TestCase):
def test_argument_error(self):
msg = "my error here"
error = argparse.ArgumentError(None, msg)
self.assertEqual(str(error), msg)
# =======================
# ArgumentTypeError tests
# =======================
class TestArgumentTypeError(TestCase):
def test_argument_type_error(self):
def spam(string):
raise argparse.ArgumentTypeError('spam!')
parser = ErrorRaisingArgumentParser(prog='PROG', add_help=False)
parser.add_argument('x', type=spam)
try:
parser.parse_args(['XXX'])
except ArgumentParserError:
expected = 'usage: PROG x\nPROG: error: argument x: spam!\n'
msg = sys.exc_info()[1].stderr
self.assertEqual(expected, msg)
else:
self.fail()
# ======================
# parse_known_args tests
# ======================
class TestParseKnownArgs(TestCase):
def test_optionals(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo')
args, extras = parser.parse_known_args('--foo F --bar --baz'.split())
self.assertEqual(NS(foo='F'), args)
self.assertEqual(['--bar', '--baz'], extras)
def test_mixed(self):
parser = argparse.ArgumentParser()
parser.add_argument('-v', nargs='?', const=1, type=int)
parser.add_argument('--spam', action='store_false')
parser.add_argument('badger')
argv = ["B", "C", "--foo", "-v", "3", "4"]
args, extras = parser.parse_known_args(argv)
self.assertEqual(NS(v=3, spam=True, badger="B"), args)
self.assertEqual(["C", "--foo", "4"], extras)
# ============================
# from argparse import * tests
# ============================
class TestImportStar(TestCase):
def test(self):
for name in argparse.__all__:
self.assertTrue(hasattr(argparse, name))
def test_all_exports_everything_but_modules(self):
items = [
name
for name, value in vars(argparse).items()
if not (name.startswith("_") or name == 'ngettext')
if not inspect.ismodule(value)
]
self.assertEqual(sorted(items), sorted(argparse.__all__))
def test_main():
# silence warnings about version argument - these are expected
with support.check_warnings(
('The "version" argument to ArgumentParser is deprecated.',
DeprecationWarning),
('The (format|print)_version method is deprecated',
DeprecationWarning)):
support.run_unittest(__name__)
# Remove global references to avoid looking like we have refleaks.
RFile.seen = {}
WFile.seen = set()
if __name__ == '__main__':
test_main()
| {
"content_hash": "139eec5e7e65a33303fdc4055b44bbdb",
"timestamp": "",
"source": "github",
"line_count": 4429,
"max_line_length": 112,
"avg_line_length": 32.567848272747796,
"alnum_prop": 0.5293289795691992,
"repo_name": "MalloyPower/parsing-python",
"id": "03c95fade4125e27d49f68e232f878295a292b2b",
"size": "144300",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.2/Lib/test/test_argparse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import ScreenshotComment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
screenshot_comment_item_mimetype,
screenshot_comment_list_mimetype)
from reviewboard.webapi.tests.mixins import (
BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_comment import (
CommentItemMixin,
CommentListMixin)
from reviewboard.webapi.tests.urls import (
get_review_screenshot_comment_item_url,
get_review_screenshot_comment_list_url)
class BaseTestCase(BaseWebAPITestCase):
fixtures = ['test_users']
def _create_screenshot_review_with_issue(self, publish=False,
comment_text=None):
"""Sets up a review for a screenshot that includes an open issue.
If `publish` is True, the review is published. The review request is
always published.
Returns the response from posting the comment, the review object, and
the review request object.
"""
if not comment_text:
comment_text = 'Test screenshot comment with an opened issue'
review_request = self.create_review_request(publish=True,
submitter=self.user)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user,
publish=publish)
comment = self.create_screenshot_comment(review, screenshot,
comment_text,
issue_opened=True)
return comment, review, review_request
class ResourceListTests(CommentListMixin, ReviewRequestChildListMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource list APIs."""
sample_api_url = 'review-requests/<id>/reviews/<id>/screenshot-comments/'
resource = resources.review_screenshot_comment
def setup_review_request_child_test(self, review_request):
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
return (get_review_screenshot_comment_list_url(review),
screenshot_comment_list_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
if populate_items:
items = [self.create_screenshot_comment(review, screenshot)]
else:
items = []
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_list_mimetype,
items)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
return (
get_review_screenshot_comment_list_url(review, local_site_name),
screenshot_comment_item_mimetype,
{
'screenshot_id': screenshot.pk,
'text': 'Test comment',
'x': 2,
'y': 2,
'w': 10,
'h': 10,
},
[review, screenshot])
def check_post_result(self, user, rsp, review, screenshot):
comment = \
ScreenshotComment.objects.get(pk=rsp['screenshot_comment']['id'])
self.compare_item(rsp['screenshot_comment'], comment)
def test_post_with_issue(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/screenshot-comments/ API
with an issue
"""
comment_text = "Test screenshot comment with an opened issue"
comment, review, review_request = \
self._create_screenshot_review_with_issue(
publish=False, comment_text=comment_text)
rsp = self.api_get(
get_review_screenshot_comment_list_url(review),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('screenshot_comments', rsp)
self.assertEqual(len(rsp['screenshot_comments']), 1)
self.assertEqual(rsp['screenshot_comments'][0]['text'], comment_text)
self.assertTrue(rsp['screenshot_comments'][0]['issue_opened'])
class ResourceItemTests(CommentItemMixin, ReviewRequestChildItemMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource item APIs."""
fixtures = ['test_users']
sample_api_url = \
'review-requests/<id>/reviews/<id>/screenshot-comments/<id>/'
resource = resources.review_screenshot_comment
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
def setup_review_request_child_test(self, review_request):
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk),
screenshot_comment_item_mimetype)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
[comment, review])
def check_delete_result(self, user, comment, review):
self.assertNotIn(comment, review.screenshot_comments.all())
def test_delete_with_does_not_exist_error(self):
"""Testing the
DELETE review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with Does Not Exist error
"""
review_request = self.create_review_request(publish=True)
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
self.api_delete(get_review_screenshot_comment_item_url(review, 123),
expected_status=404)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
comment)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
{'text': 'Test comment'},
comment,
[])
def check_put_result(self, user, item_rsp, comment, *args):
comment = ScreenshotComment.objects.get(pk=comment.pk)
self.assertEqual(item_rsp['text_type'], 'plain')
self.assertEqual(item_rsp['text'], 'Test comment')
self.compare_item(item_rsp, comment)
def test_put_with_issue(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, removing issue_opened
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_opened': False},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(rsp['screenshot_comment']['issue_opened'])
def test_put_issue_status_before_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API
with an issue, before review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
# The issue_status should not be able to be changed while the review is
# unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
def test_put_issue_status_after_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, after review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
def test_put_issue_status_by_issue_creator(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for issue creator
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request so that it's not owned by
# self.user
review_request.submitter = User.objects.get(username='doc')
review_request.save()
# The review/comment (and therefore issue) is still owned by self.user,
# so we should be able to change the issue status.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'dropped')
def test_put_issue_status_by_uninvolved_user(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for an uninvolved user
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request and review so that they're not
# owned by self.user.
new_owner = User.objects.get(username='doc')
review_request.submitter = new_owner
review_request.save()
review.user = new_owner
review.save()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_put_deleted_screenshot_comment_issue_status(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>
API with an issue and a deleted screenshot
"""
comment_text = "Test screenshot comment with an opened issue"
x, y, w, h = (2, 2, 10, 10)
review_request = self.create_review_request(publish=True,
submitter=self.user,
target_people=[self.user])
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot,
comment_text, x, y, w, h,
issue_opened=True)
# First, let's ensure that the user that has created the comment
# cannot alter the issue_status while the review is unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
# Next, let's publish the review, and try altering the issue_status.
# This should be allowed, since the review request was made by the
# current user.
review.public = True
review.save()
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
# Delete the screenshot.
self._delete_screenshot(review_request, screenshot)
review_request.publish(review_request.submitter)
# Try altering the issue_status. This should be allowed.
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'open'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
| {
"content_hash": "8a39d82b08d8eb42b4ef125f4e14f4ec",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 79,
"avg_line_length": 41.2,
"alnum_prop": 0.6084142394822006,
"repo_name": "reviewboard/reviewboard",
"id": "009cedcaf3a143df5553829d3b4754b09e6bab4e",
"size": "16686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/tests/test_review_screenshot_comment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from ..client import MultiClient
from ..shell import MultiShell
class MultiSwift(MultiClient):
def __init__(self):
super(MultiSwift, self).__init__()
self.default_executable = 'swift'
self.prefix_list += ["swift_", "swiftclient_"]
def main_client():
multistack_shell = MultiShell(MultiSwift)
multistack_shell.run_client()
| {
"content_hash": "f8629a1ef3312048c8a259ac0c562256",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 54,
"avg_line_length": 23.705882352941178,
"alnum_prop": 0.6699751861042184,
"repo_name": "testeddoughnut/multistack",
"id": "e13ae039d8e42e30d85422c61dcb9613c5a98cbf",
"size": "1028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multistack/clients/swift.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37138"
}
],
"symlink_target": ""
} |
"""
File: functional/test_email_validation.py
Creator: MazeFX
Date: 25-7-2016
1st functional test construct written by following the book
'Test Driven Development with Python' by Harry Percival.
Thanks Harry for giving me a guideline to becoming a good developer.
User description:
-----------------
Name: Dave
Occupation: Recruiter for a Django company
Goal: Recruit a Django Developer who is enthusiastic about programming
and has shown that he is prepared to obey the testing goat.
"""
from .base import FunctionalTest
import urllib.parse
class EmailValidationTest(FunctionalTest):
def test_cannot_input_invalid_values(self):
# Dave goes to the contact page and is having a very bad day
# Last night he went and drank way to much so now he is all kinds
# of mistakes when entering information
url = urllib.parse.urljoin(self.server_url, '/contact/')
self.browser.get(url)
# He at least verifies that it is the correct page
self.assertIn('Verstuur Email', self.browser.title)
# Dave doesn't enter anything and submits right away
# (For testing all fields on empty value validation)
submit_button = self.browser.find_element_by_id('submit-id-submit')
with self.wait_for_page_load(timeout=10):
submit_button.click()
# He sees that all fields have error messages.
fullname_error = self.browser.find_element_by_id('error_1_id_fullname').text
self.assertEqual('Hey je moet wel invullen!', fullname_error)
email_error = self.browser.find_element_by_id('error_1_id_email').text
self.assertEqual('Hey je moet wel invullen!', email_error)
subject_error = self.browser.find_element_by_id('error_1_id_subject').text
self.assertEqual('Hey je moet wel invullen!', subject_error)
message_error = self.browser.find_element_by_id('error_1_id_message').text
self.assertEqual('Hey je moet wel invullen!', message_error)
# He then enters an invalid email address
email_input = self.browser.find_element_by_id('id_email')
email_input.send_keys('Not an email')
submit_button = self.browser.find_element_by_id('submit-id-submit')
with self.wait_for_page_load(timeout=10):
submit_button.click()
email_error = self.browser.find_element_by_id('error_1_id_email').text
self.assertEqual('Vul een geldig email adres in.', email_error)
# He tries again with almost an email address
email_input = self.browser.find_element_by_id('id_email')
email_input.clear()
email_input.send_keys('Almost@email')
submit_button = self.browser.find_element_by_id('submit-id-submit')
with self.wait_for_page_load(timeout=10):
submit_button.click()
email_error = self.browser.find_element_by_id('error_1_id_email').text
self.assertEqual('Vul een geldig email adres in.', email_error)
# Dave enters a correct email address and the email is being sent.
# Although Dave failed to be a good user the validation check kept
# him from applying a incomplete or incorrect email.
# End of test
| {
"content_hash": "d73450904bcc09659201cdb92548e3a5",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 84,
"avg_line_length": 39.25609756097561,
"alnum_prop": 0.6784715750232991,
"repo_name": "MazeFX/cookiecutter_website_project",
"id": "781b1dc8f52480a50232e5e101d28911d5022df2",
"size": "3244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functional_tests/test_email_validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36015"
},
{
"name": "HTML",
"bytes": "107954"
},
{
"name": "JavaScript",
"bytes": "81231"
},
{
"name": "Python",
"bytes": "62159"
},
{
"name": "Shell",
"bytes": "4188"
}
],
"symlink_target": ""
} |
from couchdb import design
class CouchView(design.ViewDefinition):
def __init__(self):
params = {
'design': '_design/ctx',
'name': self.view_name,
'language': 'python',
}
if hasattr(self, 'map_fun'):
params['map_fun'] = self.map_fun
if hasattr(self, 'reduce_fun'):
params['reduce_fun'] = self.reduce_fun
super().__init__(**params)
@classmethod
def uri(self):
return '_design/ctx/_view/{}'.format(self.view_name)
class GetActiveTask(CouchView):
view_name = 'get_active_task'
@staticmethod
def map_fun(doc):
if doc.get('is_active', False):
yield doc['_id'], doc
class GetTasks(CouchView):
view_name = 'get_tasks'
@staticmethod
def map_fun(doc):
if not doc['_id'].startswith('_design'):
yield doc['_id'], doc
def sync_views(db):
GetActiveTask().sync(db)
GetTasks().sync(db)
| {
"content_hash": "f217f0dc046776f7560d0f9d8306e5d5",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 60,
"avg_line_length": 21.711111111111112,
"alnum_prop": 0.5496417604912999,
"repo_name": "kevinjqiu/ctx",
"id": "5c267bbda56117ec7a654b9e3743e003db7e232d",
"size": "977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctx/view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "3454"
},
{
"name": "Makefile",
"bytes": "1689"
},
{
"name": "Python",
"bytes": "31951"
}
],
"symlink_target": ""
} |
"""Utils to run ipmitool for data collection"""
from oslo_concurrency import processutils
from ceilometer.i18n import _
from ceilometer.ipmi.platform import exception as ipmiexcept
from ceilometer import utils
# Following 2 functions are copied from ironic project to handle ipmitool's
# sensor data output. Need code clean and sharing in future.
# Check ironic/drivers/modules/ipmitool.py
def _get_sensor_type(sensor_data_dict):
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
'Sensor Type (Threshold)'):
try:
return sensor_data_dict[key].split(' ', 1)[0]
except KeyError:
continue
raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed,"
"unknown sensor type"))
def _process_sensor(sensor_data):
sensor_data_fields = sensor_data.split('\n')
sensor_data_dict = {}
for field in sensor_data_fields:
if not field:
continue
kv_value = field.split(':')
if len(kv_value) != 2:
continue
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
return sensor_data_dict
def _translate_output(output):
"""Translate the return value into JSON dict
:param output: output of the execution of IPMI command(sensor reading)
"""
sensors_data_dict = {}
sensors_data_array = output.split('\n\n')
for sensor_data in sensors_data_array:
sensor_data_dict = _process_sensor(sensor_data)
if not sensor_data_dict:
continue
sensor_type = _get_sensor_type(sensor_data_dict)
# ignore the sensors which have no current 'Sensor Reading' data
sensor_id = sensor_data_dict['Sensor ID']
if 'Sensor Reading' in sensor_data_dict:
sensors_data_dict.setdefault(sensor_type,
{})[sensor_id] = sensor_data_dict
# get nothing, no valid sensor data
if not sensors_data_dict:
raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed,"
"No data retrieved from given input"))
return sensors_data_dict
def _parse_output(output, template):
"""Parse the return value of IPMI command into dict
:param output: output of the execution of IPMI command
:param template: a dict that contains the expected items of
IPMI command and its length.
"""
ret = {}
index = 0
if not (output and template):
return ret
if "translate" in template:
ret = _translate_output(output)
else:
output_list = output.strip().split(' ')
if sum(template.values()) != len(output_list):
raise ipmiexcept.IPMIException(_("ipmitool output "
"length mismatch"))
for item in template.items():
index_end = index + item[1]
update_value = output_list[index: index_end]
ret[item[0]] = update_value
index = index_end
return ret
def execute_ipmi_cmd(template=None):
"""Decorator for the execution of IPMI command.
It parses the output of IPMI command into dictionary.
"""
template = template or []
def _execute_ipmi_cmd(f):
def _execute(self, **kwargs):
args = ['ipmitool']
command = f(self, **kwargs)
args.extend(command.split(" "))
try:
(out, __) = utils.execute(*args, run_as_root=True)
except processutils.ProcessExecutionError:
raise ipmiexcept.IPMIException(_("running ipmitool failure"))
return _parse_output(out, template)
return _execute
return _execute_ipmi_cmd
| {
"content_hash": "575e9fa8bcf4d084d5cfb14c72bd636d",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 33.00847457627118,
"alnum_prop": 0.6030808729139923,
"repo_name": "Juniper/ceilometer",
"id": "3aefddd9764fa862a29d50f0c21cf921aab5e2c0",
"size": "4472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/ipmi/platform/ipmitool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6027"
},
{
"name": "Python",
"bytes": "2857750"
},
{
"name": "Shell",
"bytes": "4136"
}
],
"symlink_target": ""
} |
import os
from twilio.rest import Client
# Initialize the client
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
credential = client.chat \
.credentials("CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.update(friendly_name="MyCredential", api_key=os.environ['TWILIO_API_KEY_SECRET'])
print(credential.friendly_name)
| {
"content_hash": "3d433f75325aeb34af1b1cff04cf975a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 32.07142857142857,
"alnum_prop": 0.7594654788418709,
"repo_name": "TwilioDevEd/api-snippets",
"id": "ca76dac563ae8b40b6f8a7405d5d8f3fb63e89b8",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ip-messaging/rest/credentials/update-credentials/update-credentials.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
} |
import datetime
from osmaxx.user_messaging.interaction import add_user_message
from django.conf import settings
from django.contrib import messages
from django.core import mail
from django.core.cache import cache
from django.utils.translation import gettext as _
class Emissary:
def __init__(self, recipient):
self.recipient = recipient
def info(self, message):
self.inform(messages.INFO, message)
def success(self, message):
self.inform(messages.SUCCESS, message)
def warn(self, message):
self.inform(messages.WARNING, message)
def error(self, message):
self.inform(messages.ERROR, message)
def debug(self, message):
self.inform(messages.DEBUG, message)
def inform_mail(self, subject, mail_body, warn_if_no_email=True):
try:
email_address = self.recipient.email
mail.send_mail(
settings.EMAIL_SUBJECT_PREFIX + subject,
mail_body,
settings.DEFAULT_FROM_EMAIL,
[email_address],
)
except AttributeError:
if warn_if_no_email:
self.warn(
_(
"There is no email address assigned to your account. You won't be notified by email!"
)
)
def inform(self, message_type, message):
add_user_message(
msg=message,
level=message_type,
user=self.recipient,
)
def get_cached_or_set(
cache_string,
func,
*args,
timeout=datetime.timedelta(minutes=15).total_seconds(),
on_cache_hit=None,
**kwargs
):
"""Gets requested value from cache, else produces it with specified function and caches it.
Gets the value at key ``cache_string`` from the cache. If it can't be found in the Django cache,
calls ``func(*args, **kwargs)`` to obtain a new value, which is returned and stored in the cache
at key ``cache_string``.
Args:
cache_string: Key for looking up the cached value and for storing newly computed values in case of a cache miss
func: Called with ``*args`` and ``**kwargs`` in case of a cache miss to provide the value
*args: Passed to ``func`` or ``on_cache_hit``
timeout: How long (in seconds) to cache a newly obtained value. Defaults to 15 minutes.
on_cache_hit: Called with the cached_value, ``*args`` and ``**kwargs`` if there was a cache hit
**kwargs: Passed to ``func`` or ``on_cache_hit``
Returns:
The cached value or in case of a cache miss or the newly obtained (and now cached) value.
"""
cached_value = cache.get(cache_string)
if cached_value is None:
cached_value = func(*args, **kwargs)
cache.set(cache_string, cached_value, timeout=timeout)
elif on_cache_hit:
on_cache_hit(cached_value, *args, **kwargs)
return cached_value
| {
"content_hash": "08136a0713ccab2789ce648ef99dbbd8",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 119,
"avg_line_length": 34.16279069767442,
"alnum_prop": 0.6221919673247107,
"repo_name": "geometalab/osmaxx",
"id": "c55194228621227862cdfefbc9335550d337ce6a",
"size": "2938",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osmaxx/utils/shortcuts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21768"
},
{
"name": "Dockerfile",
"bytes": "7740"
},
{
"name": "HTML",
"bytes": "280740"
},
{
"name": "JavaScript",
"bytes": "264630"
},
{
"name": "Jinja",
"bytes": "6869531"
},
{
"name": "Lua",
"bytes": "5473"
},
{
"name": "Makefile",
"bytes": "4873"
},
{
"name": "NSIS",
"bytes": "5370"
},
{
"name": "Python",
"bytes": "544979"
},
{
"name": "Roff",
"bytes": "1233"
},
{
"name": "Shell",
"bytes": "9501"
}
],
"symlink_target": ""
} |
from kfp import compiler
from kfp import dsl
from kfp.dsl import component
@component
def flip_coin_op() -> str:
"""Flip a coin and output heads or tails randomly."""
import random
result = 'heads' if random.randint(0, 1) == 0 else 'tails'
return result
@component
def print_op(msg: str):
"""Print a message."""
print(msg)
@dsl.pipeline(name='nested-conditions-pipeline')
def my_pipeline():
flip1 = flip_coin_op()
print_op(msg=flip1.output)
flip2 = flip_coin_op()
print_op(msg=flip2.output)
with dsl.Condition(flip1.output != 'no-such-result'): # always true
flip3 = flip_coin_op()
print_op(msg=flip3.output)
with dsl.Condition(flip2.output == flip3.output):
flip4 = flip_coin_op()
print_op(msg=flip4.output)
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
package_path=__file__.replace('.py', '.yaml'))
| {
"content_hash": "988f76138601d7e3a7afd4a0a9b00727",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6226611226611226,
"repo_name": "kubeflow/pipelines",
"id": "febd3b6744d9910cc22b2e5adc93f6a8b66f9bca",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/python/test_data/pipelines/pipeline_with_nested_conditions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
} |
"""Helpers for config validation using voluptuous."""
from __future__ import annotations
from collections.abc import Callable, Hashable
import contextlib
from datetime import (
date as date_sys,
datetime as datetime_sys,
time as time_sys,
timedelta,
)
from enum import Enum
import inspect
import logging
from numbers import Number
import os
import re
from socket import ( # type: ignore[attr-defined] # private, not in typeshed
_GLOBAL_DEFAULT_TIMEOUT,
)
from typing import Any, TypeVar, cast, overload
from urllib.parse import urlparse
from uuid import UUID
import voluptuous as vol
import voluptuous_serialize
from homeassistant.const import (
ATTR_AREA_ID,
ATTR_DEVICE_ID,
ATTR_ENTITY_ID,
CONF_ABOVE,
CONF_ALIAS,
CONF_ATTRIBUTE,
CONF_BELOW,
CONF_CHOOSE,
CONF_CONDITION,
CONF_CONDITIONS,
CONF_CONTINUE_ON_ERROR,
CONF_CONTINUE_ON_TIMEOUT,
CONF_COUNT,
CONF_DEFAULT,
CONF_DELAY,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ELSE,
CONF_ENABLED,
CONF_ENTITY_ID,
CONF_ENTITY_NAMESPACE,
CONF_ERROR,
CONF_EVENT,
CONF_EVENT_DATA,
CONF_EVENT_DATA_TEMPLATE,
CONF_FOR,
CONF_FOR_EACH,
CONF_ID,
CONF_IF,
CONF_MATCH,
CONF_PARALLEL,
CONF_PLATFORM,
CONF_REPEAT,
CONF_SCAN_INTERVAL,
CONF_SCENE,
CONF_SEQUENCE,
CONF_SERVICE,
CONF_SERVICE_DATA,
CONF_SERVICE_DATA_TEMPLATE,
CONF_SERVICE_TEMPLATE,
CONF_STATE,
CONF_STOP,
CONF_TARGET,
CONF_THEN,
CONF_TIMEOUT,
CONF_UNTIL,
CONF_VALUE_TEMPLATE,
CONF_VARIABLES,
CONF_WAIT_FOR_TRIGGER,
CONF_WAIT_TEMPLATE,
CONF_WHILE,
ENTITY_MATCH_ALL,
ENTITY_MATCH_ANY,
ENTITY_MATCH_NONE,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
WEEKDAYS,
)
from homeassistant.core import split_entity_id, valid_entity_id
from homeassistant.exceptions import TemplateError
from homeassistant.util import raise_if_invalid_path, slugify as util_slugify
import homeassistant.util.dt as dt_util
from . import script_variables as script_variables_helper, template as template_helper
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM', 'HH:MM:SS' or 'HH:MM:SS.F'"
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
positive_float = vol.All(vol.Coerce(float), vol.Range(min=0))
latitude = vol.All(
vol.Coerce(float), vol.Range(min=-90, max=90), msg="invalid latitude"
)
longitude = vol.All(
vol.Coerce(float), vol.Range(min=-180, max=180), msg="invalid longitude"
)
gps = vol.ExactSequence([latitude, longitude])
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
port = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
# typing typevar
_T = TypeVar("_T")
def path(value: Any) -> str:
"""Validate it's a safe path."""
if not isinstance(value, str):
raise vol.Invalid("Expected a string")
try:
raise_if_invalid_path(value)
except ValueError as err:
raise vol.Invalid("Invalid path") from err
return value
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys: Any) -> Callable[[dict], dict]:
"""Validate that at least one key exists."""
def validate(obj: dict) -> dict:
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid("expected dictionary")
for k in obj:
if k in keys:
return obj
expected = ", ".join(str(k) for k in keys)
raise vol.Invalid(f"must contain at least one of {expected}.")
return validate
def has_at_most_one_key(*keys: Any) -> Callable[[dict], dict]:
"""Validate that zero keys exist or one key exists."""
def validate(obj: dict) -> dict:
"""Test zero keys exist or one key exists in dict."""
if not isinstance(obj, dict):
raise vol.Invalid("expected dictionary")
if len(set(keys) & set(obj)) > 1:
expected = ", ".join(str(k) for k in keys)
raise vol.Invalid(f"must contain at most one of {expected}.")
return obj
return validate
def boolean(value: Any) -> bool:
"""Validate and coerce a boolean value."""
if isinstance(value, bool):
return value
if isinstance(value, str):
value = value.lower().strip()
if value in ("1", "true", "yes", "on", "enable"):
return True
if value in ("0", "false", "no", "off", "disable"):
return False
elif isinstance(value, Number):
# type ignore: https://github.com/python/mypy/issues/3186
return value != 0 # type: ignore[comparison-overlap]
raise vol.Invalid(f"invalid boolean value {value}")
_WS = re.compile("\\s*")
def whitespace(value: Any) -> str:
"""Validate result contains only whitespace."""
if isinstance(value, str) and _WS.fullmatch(value):
return value
raise vol.Invalid(f"contains non-whitespace: {value}")
def isdevice(value: Any) -> str:
"""Validate that value is a real device."""
try:
os.stat(value)
return str(value)
except OSError as err:
raise vol.Invalid(f"No device at {value} found") from err
def matches_regex(regex: str) -> Callable[[Any], str]:
"""Validate that the value is a string that matches a regex."""
compiled = re.compile(regex)
def validator(value: Any) -> str:
"""Validate that value matches the given regex."""
if not isinstance(value, str):
raise vol.Invalid(f"not a string value: {value}")
if not compiled.match(value):
raise vol.Invalid(
f"value {value} does not match regular expression {compiled.pattern}"
)
return value
return validator
def is_regex(value: Any) -> re.Pattern[Any]:
"""Validate that a string is a valid regular expression."""
try:
r = re.compile(value)
return r
except TypeError as err:
raise vol.Invalid(
f"value {value} is of the wrong type for a regular expression"
) from err
except re.error as err:
raise vol.Invalid(f"value {value} is not a valid regular expression") from err
def isfile(value: Any) -> str:
"""Validate that the value is an existing file."""
if value is None:
raise vol.Invalid("None is not file")
file_in = os.path.expanduser(str(value))
if not os.path.isfile(file_in):
raise vol.Invalid("not a file")
if not os.access(file_in, os.R_OK):
raise vol.Invalid("file not readable")
return file_in
def isdir(value: Any) -> str:
"""Validate that the value is an existing dir."""
if value is None:
raise vol.Invalid("not a directory")
dir_in = os.path.expanduser(str(value))
if not os.path.isdir(dir_in):
raise vol.Invalid("not a directory")
if not os.access(dir_in, os.R_OK):
raise vol.Invalid("directory not readable")
return dir_in
@overload
def ensure_list(value: None) -> list[Any]:
...
@overload
def ensure_list(value: list[_T]) -> list[_T]:
...
@overload
def ensure_list(value: list[_T] | _T) -> list[_T]:
...
def ensure_list(value: _T | None) -> list[_T] | list[Any]:
"""Wrap value in list if it is not one."""
if value is None:
return []
return cast("list[_T]", value) if isinstance(value, list) else [value]
def entity_id(value: Any) -> str:
"""Validate Entity ID."""
str_value = string(value).lower()
if valid_entity_id(str_value):
return str_value
raise vol.Invalid(f"Entity ID {value} is an invalid entity ID")
def entity_id_or_uuid(value: Any) -> str:
"""Validate Entity specified by entity_id or uuid."""
with contextlib.suppress(vol.Invalid):
return entity_id(value)
with contextlib.suppress(vol.Invalid):
return fake_uuid4_hex(value)
raise vol.Invalid(f"Entity {value} is neither a valid entity ID nor a valid UUID")
def _entity_ids(value: str | list, allow_uuid: bool) -> list[str]:
"""Help validate entity IDs or UUIDs."""
if value is None:
raise vol.Invalid("Entity IDs can not be None")
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(",")]
validator = entity_id_or_uuid if allow_uuid else entity_id
return [validator(ent_id) for ent_id in value]
def entity_ids(value: str | list) -> list[str]:
"""Validate Entity IDs."""
return _entity_ids(value, False)
def entity_ids_or_uuids(value: str | list) -> list[str]:
"""Validate entities specified by entity IDs or UUIDs."""
return _entity_ids(value, True)
comp_entity_ids = vol.Any(
vol.All(vol.Lower, vol.Any(ENTITY_MATCH_ALL, ENTITY_MATCH_NONE)), entity_ids
)
comp_entity_ids_or_uuids = vol.Any(
vol.All(vol.Lower, vol.Any(ENTITY_MATCH_ALL, ENTITY_MATCH_NONE)),
entity_ids_or_uuids,
)
def entity_domain(domain: str | list[str]) -> Callable[[Any], str]:
"""Validate that entity belong to domain."""
ent_domain = entities_domain(domain)
def validate(value: str) -> str:
"""Test if entity domain is domain."""
validated = ent_domain(value)
if len(validated) != 1:
raise vol.Invalid(f"Expected exactly 1 entity, got {len(validated)}")
return validated[0]
return validate
def entities_domain(domain: str | list[str]) -> Callable[[str | list], list[str]]:
"""Validate that entities belong to domain."""
if isinstance(domain, str):
def check_invalid(val: str) -> bool:
return val != domain
else:
def check_invalid(val: str) -> bool:
return val not in domain
def validate(values: str | list) -> list[str]:
"""Test if entity domain is domain."""
values = entity_ids(values)
for ent_id in values:
if check_invalid(split_entity_id(ent_id)[0]):
raise vol.Invalid(
f"Entity ID '{ent_id}' does not belong to domain '{domain}'"
)
return values
return validate
def enum(enumClass: type[Enum]) -> vol.All:
"""Create validator for specified enum."""
return vol.All(vol.In(enumClass.__members__), enumClass.__getitem__)
def icon(value: Any) -> str:
"""Validate icon."""
str_value = str(value)
if ":" in str_value:
return str_value
raise vol.Invalid('Icons should be specified in the form "prefix:name"')
time_period_dict = vol.All(
dict,
vol.Schema(
{
"days": vol.Coerce(float),
"hours": vol.Coerce(float),
"minutes": vol.Coerce(float),
"seconds": vol.Coerce(float),
"milliseconds": vol.Coerce(float),
}
),
has_at_least_one_key("days", "hours", "minutes", "seconds", "milliseconds"),
lambda value: timedelta(**value),
)
def time(value: Any) -> time_sys:
"""Validate and transform a time."""
if isinstance(value, time_sys):
return value
try:
time_val = dt_util.parse_time(value)
except TypeError as err:
raise vol.Invalid("Not a parseable type") from err
if time_val is None:
raise vol.Invalid(f"Invalid time specified: {value}")
return time_val
def date(value: Any) -> date_sys:
"""Validate and transform a date."""
if isinstance(value, date_sys):
return value
try:
date_val = dt_util.parse_date(value)
except TypeError as err:
raise vol.Invalid("Not a parseable type") from err
if date_val is None:
raise vol.Invalid("Could not parse date")
return date_val
def time_period_str(value: str) -> timedelta:
"""Validate and transform time offset."""
if isinstance(value, int): # type: ignore[unreachable]
raise vol.Invalid("Make sure you wrap time values in quotes")
if not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith("-"):
negative_offset = True
value = value[1:]
elif value.startswith("+"):
value = value[1:]
parsed = value.split(":")
if len(parsed) not in (2, 3):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
try:
hour = int(parsed[0])
minute = int(parsed[1])
try:
second = float(parsed[2])
except IndexError:
second = 0
except ValueError as err:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value)) from err
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
def time_period_seconds(value: float | str) -> timedelta:
"""Validate and transform seconds to a time offset."""
try:
return timedelta(seconds=float(value))
except (ValueError, TypeError) as err:
raise vol.Invalid(f"Expected seconds, got {value}") from err
time_period = vol.Any(time_period_str, time_period_seconds, timedelta, time_period_dict)
def match_all(value: _T) -> _T:
"""Validate that matches all values."""
return value
def positive_timedelta(value: timedelta) -> timedelta:
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid("Time period should be positive")
return value
positive_time_period_dict = vol.All(time_period_dict, positive_timedelta)
positive_time_period = vol.All(time_period, positive_timedelta)
def remove_falsy(value: list[_T]) -> list[_T]:
"""Remove falsy values from a list."""
return [v for v in value if v]
def service(value: Any) -> str:
"""Validate service."""
# Services use same format as entities so we can use same helper.
str_value = string(value).lower()
if valid_entity_id(str_value):
return str_value
raise vol.Invalid(f"Service {value} does not match format <domain>.<name>")
def slug(value: Any) -> str:
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid("Slug should not be None")
str_value = str(value)
slg = util_slugify(str_value)
if str_value == slg:
return str_value
raise vol.Invalid(f"invalid slug {value} (try {slg})")
def schema_with_slug_keys(
value_schema: _T | Callable, *, slug_validator: Callable[[Any], str] = slug
) -> Callable:
"""Ensure dicts have slugs as keys.
Replacement of vol.Schema({cv.slug: value_schema}) to prevent misleading
"Extra keys" errors from voluptuous.
"""
schema = vol.Schema({str: value_schema})
def verify(value: dict) -> dict:
"""Validate all keys are slugs and then the value_schema."""
if not isinstance(value, dict):
raise vol.Invalid("expected dictionary")
for key in value.keys():
slug_validator(key)
return cast(dict, schema(value))
return verify
def slugify(value: Any) -> str:
"""Coerce a value to a slug."""
if value is None:
raise vol.Invalid("Slug should not be None")
slg = util_slugify(str(value))
if slg:
return slg
raise vol.Invalid(f"Unable to slugify {value}")
def string(value: Any) -> str:
"""Coerce value to string, except for None."""
if value is None:
raise vol.Invalid("string value is None")
if isinstance(value, template_helper.ResultWrapper):
value = value.render_result
elif isinstance(value, (list, dict)):
raise vol.Invalid("value should be a string")
return str(value)
def string_with_no_html(value: Any) -> str:
"""Validate that the value is a string without HTML."""
value = string(value)
regex = re.compile(r"<[a-z][\s\S]*>")
if regex.search(value):
raise vol.Invalid("the string should not contain HTML")
return str(value)
def temperature_unit(value: Any) -> str:
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == "C":
return TEMP_CELSIUS
if value == "F":
return TEMP_FAHRENHEIT
raise vol.Invalid("invalid temperature unit (expected C or F)")
def template(value: Any | None) -> template_helper.Template:
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid("template value is None")
if isinstance(value, (list, dict, template_helper.Template)):
raise vol.Invalid("template value should be a string")
template_value = template_helper.Template(str(value)) # type: ignore[no-untyped-call]
try:
template_value.ensure_valid()
return template_value
except TemplateError as ex:
raise vol.Invalid(f"invalid template ({ex})") from ex
def dynamic_template(value: Any | None) -> template_helper.Template:
"""Validate a dynamic (non static) jinja2 template."""
if value is None:
raise vol.Invalid("template value is None")
if isinstance(value, (list, dict, template_helper.Template)):
raise vol.Invalid("template value should be a string")
if not template_helper.is_template_string(str(value)):
raise vol.Invalid("template value does not contain a dynamic template")
template_value = template_helper.Template(str(value)) # type: ignore[no-untyped-call]
try:
template_value.ensure_valid()
return template_value
except TemplateError as ex:
raise vol.Invalid(f"invalid template ({ex})") from ex
def template_complex(value: Any) -> Any:
"""Validate a complex jinja2 template."""
if isinstance(value, list):
return_list = value.copy()
for idx, element in enumerate(return_list):
return_list[idx] = template_complex(element)
return return_list
if isinstance(value, dict):
return {
template_complex(key): template_complex(element)
for key, element in value.items()
}
if isinstance(value, str) and template_helper.is_template_string(value):
return template(value)
return value
positive_time_period_template = vol.Any(
positive_time_period, template, template_complex
)
def datetime(value: Any) -> datetime_sys:
"""Validate datetime."""
if isinstance(value, datetime_sys):
return value
try:
date_val = dt_util.parse_datetime(value)
except TypeError:
date_val = None
if date_val is None:
raise vol.Invalid(f"Invalid datetime specified: {value}")
return date_val
def time_zone(value: str) -> str:
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
"Invalid time zone passed in. Valid options can be found here: "
"http://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
)
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
def socket_timeout(value: Any | None) -> object:
"""Validate timeout float > 0.0.
None coerced to socket._GLOBAL_DEFAULT_TIMEOUT bare object.
"""
if value is None:
return _GLOBAL_DEFAULT_TIMEOUT
try:
float_value = float(value)
if float_value > 0.0:
return float_value
raise vol.Invalid("Invalid socket timeout value. float > 0.0 required.")
except Exception as err:
raise vol.Invalid(f"Invalid socket timeout: {err}")
# pylint: disable=no-value-for-parameter
def url(value: Any) -> str:
"""Validate an URL."""
url_in = str(value)
if urlparse(url_in).scheme in ["http", "https"]:
return cast(str, vol.Schema(vol.Url())(url_in))
raise vol.Invalid("invalid url")
def url_no_path(value: Any) -> str:
"""Validate a url without a path."""
url_in = url(value)
if urlparse(url_in).path not in ("", "/"):
raise vol.Invalid("url it not allowed to have a path component")
return url_in
def x10_address(value: str) -> str:
"""Validate an x10 address."""
regex = re.compile(r"([A-Pa-p]{1})(?:[2-9]|1[0-6]?)$")
if not regex.match(value):
raise vol.Invalid("Invalid X10 Address")
return str(value).lower()
def uuid4_hex(value: Any) -> str:
"""Validate a v4 UUID in hex format."""
try:
result = UUID(value, version=4)
except (ValueError, AttributeError, TypeError) as error:
raise vol.Invalid("Invalid Version4 UUID", error_message=str(error))
if result.hex != value.lower():
# UUID() will create a uuid4 if input is invalid
raise vol.Invalid("Invalid Version4 UUID")
return result.hex
_FAKE_UUID_4_HEX = re.compile(r"^[0-9a-f]{32}$")
def fake_uuid4_hex(value: Any) -> str:
"""Validate a fake v4 UUID generated by random_uuid_hex."""
try:
if not _FAKE_UUID_4_HEX.match(value):
raise vol.Invalid("Invalid UUID")
except TypeError as exc:
raise vol.Invalid("Invalid UUID") from exc
return cast(str, value) # Pattern.match throws if input is not a string
def ensure_list_csv(value: Any) -> list:
"""Ensure that input is a list or make one from comma-separated string."""
if isinstance(value, str):
return [member.strip() for member in value.split(",")]
return ensure_list(value)
class multi_select:
"""Multi select validator returning list of selected values."""
def __init__(self, options: dict | list) -> None:
"""Initialize multi select."""
self.options = options
def __call__(self, selected: list) -> list:
"""Validate input."""
if not isinstance(selected, list):
raise vol.Invalid("Not a list")
for value in selected:
if value not in self.options:
raise vol.Invalid(f"{value} is not a valid option")
return selected
def _deprecated_or_removed(
key: str,
replacement_key: str | None,
default: Any | None,
raise_if_present: bool,
option_removed: bool,
) -> Callable[[dict], dict]:
"""
Log key as deprecated and provide a replacement (if exists) or fail.
Expected behavior:
- Outputs or throws the appropriate deprecation warning if key is detected
- Outputs or throws the appropriate error if key is detected and removed from support
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
"""
module = inspect.getmodule(inspect.stack(context=0)[2].frame)
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the call stack frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/core/issues/24982
module_name = __name__
if option_removed:
logger_func = logging.getLogger(module_name).error
option_status = "has been removed"
else:
logger_func = logging.getLogger(module_name).warning
option_status = "is deprecated"
def validator(config: dict) -> dict:
"""Check if key is in config and log warning or error."""
if key in config:
try:
near = f"near {config.__config_file__}:{config.__line__} " # type: ignore[attr-defined]
except AttributeError:
near = ""
arguments: tuple[str, ...]
if replacement_key:
warning = "The '%s' option %s%s, please replace it with '%s'"
arguments = (key, near, option_status, replacement_key)
else:
warning = (
"The '%s' option %s%s, please remove it from your configuration"
)
arguments = (key, near, option_status)
if raise_if_present:
raise vol.Invalid(warning % arguments)
logger_func(warning, *arguments)
value = config[key]
if replacement_key:
config.pop(key)
else:
value = default
keys = [key]
if replacement_key:
keys.append(replacement_key)
if value is not None and (
replacement_key not in config or default == config.get(replacement_key)
):
config[replacement_key] = value
return has_at_most_one_key(*keys)(config)
return validator
def deprecated(
key: str,
replacement_key: str | None = None,
default: Any | None = None,
raise_if_present: bool | None = False,
) -> Callable[[dict], dict]:
"""
Log key as deprecated and provide a replacement (if exists).
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected or raises an exception
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
"""
return _deprecated_or_removed(
key,
replacement_key=replacement_key,
default=default,
raise_if_present=raise_if_present or False,
option_removed=False,
)
def removed(
key: str,
default: Any | None = None,
raise_if_present: bool | None = True,
) -> Callable[[dict], dict]:
"""
Log key as deprecated and fail the config validation.
Expected behavior:
- Outputs the appropriate error if key is detected and removed from support or raises an exception
"""
return _deprecated_or_removed(
key,
replacement_key=None,
default=default,
raise_if_present=raise_if_present or False,
option_removed=True,
)
def key_value_schemas(
key: str,
value_schemas: dict[Hashable, vol.Schema],
default_schema: vol.Schema | None = None,
default_description: str | None = None,
) -> Callable[[Any], dict[Hashable, Any]]:
"""Create a validator that validates based on a value for specific key.
This gives better error messages.
"""
def key_value_validator(value: Any) -> dict[Hashable, Any]:
if not isinstance(value, dict):
raise vol.Invalid("Expected a dictionary")
key_value = value.get(key)
if isinstance(key_value, Hashable) and key_value in value_schemas:
return cast(dict[Hashable, Any], value_schemas[key_value](value))
if default_schema:
with contextlib.suppress(vol.Invalid):
return cast(dict[Hashable, Any], default_schema(value))
alternatives = ", ".join(str(key) for key in value_schemas)
if default_description:
alternatives += ", " + default_description
raise vol.Invalid(
f"Unexpected value for {key}: '{key_value}'. Expected {alternatives}"
)
return key_value_validator
# Validator helpers
def key_dependency(
key: Hashable, dependency: Hashable
) -> Callable[[dict[Hashable, Any]], dict[Hashable, Any]]:
"""Validate that all dependencies exist for key."""
def validator(value: dict[Hashable, Any]) -> dict[Hashable, Any]:
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid("key dependencies require a dict")
if key in value and dependency not in value:
raise vol.Invalid(
f'dependency violation - key "{key}" requires '
f'key "{dependency}" to exist'
)
return value
return validator
def custom_serializer(schema: Any) -> Any:
"""Serialize additional types for voluptuous_serialize."""
from . import selector # pylint: disable=import-outside-toplevel
if schema is positive_time_period_dict:
return {"type": "positive_time_period_dict"}
if schema is string:
return {"type": "string"}
if schema is boolean:
return {"type": "boolean"}
if isinstance(schema, multi_select):
return {"type": "multi_select", "options": schema.options}
if isinstance(schema, selector.Selector):
return schema.serialize()
return voluptuous_serialize.UNSUPPORTED
def expand_condition_shorthand(value: Any | None) -> Any:
"""Expand boolean condition shorthand notations."""
if not isinstance(value, dict) or CONF_CONDITIONS in value:
return value
for key, schema in (
("and", AND_CONDITION_SHORTHAND_SCHEMA),
("or", OR_CONDITION_SHORTHAND_SCHEMA),
("not", NOT_CONDITION_SHORTHAND_SCHEMA),
):
try:
schema(value)
return {
CONF_CONDITION: key,
CONF_CONDITIONS: value[key],
**{k: value[k] for k in value if k != key},
}
except vol.MultipleInvalid:
pass
if isinstance(value.get(CONF_CONDITION), list):
try:
CONDITION_SHORTHAND_SCHEMA(value)
return {
CONF_CONDITION: "and",
CONF_CONDITIONS: value[CONF_CONDITION],
**{k: value[k] for k in value if k != CONF_CONDITION},
}
except vol.MultipleInvalid:
pass
return value
# Schemas
PLATFORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): string,
vol.Optional(CONF_ENTITY_NAMESPACE): string,
vol.Optional(CONF_SCAN_INTERVAL): time_period,
}
)
PLATFORM_SCHEMA_BASE = PLATFORM_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
ENTITY_SERVICE_FIELDS = {
# Either accept static entity IDs, a single dynamic template or a mixed list
# of static and dynamic templates. While this could be solved with a single
# complex template, handling it like this, keeps config validation useful.
vol.Optional(ATTR_ENTITY_ID): vol.Any(
comp_entity_ids, dynamic_template, vol.All(list, template_complex)
),
vol.Optional(ATTR_DEVICE_ID): vol.Any(
ENTITY_MATCH_NONE, vol.All(ensure_list, [vol.Any(dynamic_template, str)])
),
vol.Optional(ATTR_AREA_ID): vol.Any(
ENTITY_MATCH_NONE, vol.All(ensure_list, [vol.Any(dynamic_template, str)])
),
}
TARGET_SERVICE_FIELDS = {
# Same as ENTITY_SERVICE_FIELDS but supports specifying entity by entity registry
# ID.
# Either accept static entity IDs, a single dynamic template or a mixed list
# of static and dynamic templates. While this could be solved with a single
# complex template, handling it like this, keeps config validation useful.
vol.Optional(ATTR_ENTITY_ID): vol.Any(
comp_entity_ids_or_uuids, dynamic_template, vol.All(list, template_complex)
),
vol.Optional(ATTR_DEVICE_ID): vol.Any(
ENTITY_MATCH_NONE, vol.All(ensure_list, [vol.Any(dynamic_template, str)])
),
vol.Optional(ATTR_AREA_ID): vol.Any(
ENTITY_MATCH_NONE, vol.All(ensure_list, [vol.Any(dynamic_template, str)])
),
}
def make_entity_service_schema(
schema: dict, *, extra: int = vol.PREVENT_EXTRA
) -> vol.Schema:
"""Create an entity service schema."""
return vol.Schema(
vol.All(
vol.Schema(
{
# The frontend stores data here. Don't use in core.
vol.Remove("metadata"): dict,
**schema,
**ENTITY_SERVICE_FIELDS,
},
extra=extra,
),
has_at_least_one_key(*ENTITY_SERVICE_FIELDS),
)
)
SCRIPT_VARIABLES_SCHEMA = vol.All(
vol.Schema({str: template_complex}),
# pylint: disable=unnecessary-lambda
lambda val: script_variables_helper.ScriptVariables(val),
)
def script_action(value: Any) -> dict:
"""Validate a script action."""
if not isinstance(value, dict):
raise vol.Invalid("expected dictionary")
try:
action = determine_script_action(value)
except ValueError as err:
raise vol.Invalid(str(err))
return ACTION_TYPE_SCHEMAS[action](value)
SCRIPT_SCHEMA = vol.All(ensure_list, [script_action])
SCRIPT_ACTION_BASE_SCHEMA = {
vol.Optional(CONF_ALIAS): string,
vol.Optional(CONF_CONTINUE_ON_ERROR): boolean,
vol.Optional(CONF_ENABLED): boolean,
}
EVENT_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_EVENT): string,
vol.Optional(CONF_EVENT_DATA): vol.All(dict, template_complex),
vol.Optional(CONF_EVENT_DATA_TEMPLATE): vol.All(dict, template_complex),
}
)
SERVICE_SCHEMA = vol.All(
vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Exclusive(CONF_SERVICE, "service name"): vol.Any(
service, dynamic_template
),
vol.Exclusive(CONF_SERVICE_TEMPLATE, "service name"): vol.Any(
service, dynamic_template
),
vol.Optional(CONF_SERVICE_DATA): vol.Any(
template, vol.All(dict, template_complex)
),
vol.Optional(CONF_SERVICE_DATA_TEMPLATE): vol.Any(
template, vol.All(dict, template_complex)
),
vol.Optional(CONF_ENTITY_ID): comp_entity_ids,
vol.Optional(CONF_TARGET): vol.Any(TARGET_SERVICE_FIELDS, dynamic_template),
# The frontend stores data here. Don't use in core.
vol.Remove("metadata"): dict,
}
),
has_at_least_one_key(CONF_SERVICE, CONF_SERVICE_TEMPLATE),
)
NUMERIC_STATE_THRESHOLD_SCHEMA = vol.Any(
vol.Coerce(float), vol.All(str, entity_domain(["input_number", "number", "sensor"]))
)
CONDITION_BASE_SCHEMA = {
vol.Optional(CONF_ALIAS): string,
vol.Optional(CONF_ENABLED): boolean,
}
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(
vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "numeric_state",
vol.Required(CONF_ENTITY_ID): entity_ids_or_uuids,
vol.Optional(CONF_ATTRIBUTE): str,
CONF_BELOW: NUMERIC_STATE_THRESHOLD_SCHEMA,
CONF_ABOVE: NUMERIC_STATE_THRESHOLD_SCHEMA,
vol.Optional(CONF_VALUE_TEMPLATE): template,
}
),
has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
STATE_CONDITION_BASE_SCHEMA = {
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "state",
vol.Required(CONF_ENTITY_ID): entity_ids_or_uuids,
vol.Optional(CONF_MATCH, default=ENTITY_MATCH_ALL): vol.All(
vol.Lower, vol.Any(ENTITY_MATCH_ALL, ENTITY_MATCH_ANY)
),
vol.Optional(CONF_ATTRIBUTE): str,
vol.Optional(CONF_FOR): positive_time_period,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional("from"): str,
}
STATE_CONDITION_STATE_SCHEMA = vol.Schema(
{
**STATE_CONDITION_BASE_SCHEMA,
vol.Required(CONF_STATE): vol.Any(str, [str]),
}
)
STATE_CONDITION_ATTRIBUTE_SCHEMA = vol.Schema(
{
**STATE_CONDITION_BASE_SCHEMA,
vol.Required(CONF_STATE): match_all,
}
)
def STATE_CONDITION_SCHEMA(value: Any) -> dict: # pylint: disable=invalid-name
"""Validate a state condition."""
if not isinstance(value, dict):
raise vol.Invalid("Expected a dictionary")
if CONF_ATTRIBUTE in value:
validated: dict = STATE_CONDITION_ATTRIBUTE_SCHEMA(value)
else:
validated = STATE_CONDITION_STATE_SCHEMA(value)
return key_dependency("for", "state")(validated)
SUN_CONDITION_SCHEMA = vol.All(
vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "sun",
vol.Optional("before"): sun_event,
vol.Optional("before_offset"): time_period,
vol.Optional("after"): vol.All(
vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE)
),
vol.Optional("after_offset"): time_period,
}
),
has_at_least_one_key("before", "after"),
)
TEMPLATE_CONDITION_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "template",
vol.Required(CONF_VALUE_TEMPLATE): template,
}
)
TIME_CONDITION_SCHEMA = vol.All(
vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "time",
vol.Optional("before"): vol.Any(
time, vol.All(str, entity_domain(["input_datetime", "sensor"]))
),
vol.Optional("after"): vol.Any(
time, vol.All(str, entity_domain(["input_datetime", "sensor"]))
),
vol.Optional("weekday"): weekdays,
}
),
has_at_least_one_key("before", "after", "weekday"),
)
TRIGGER_CONDITION_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "trigger",
vol.Required(CONF_ID): vol.All(ensure_list, [string]),
}
)
ZONE_CONDITION_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "zone",
vol.Required(CONF_ENTITY_ID): entity_ids,
vol.Required("zone"): entity_ids,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional("event"): vol.Any("enter", "leave"),
}
)
AND_CONDITION_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "and",
vol.Required(CONF_CONDITIONS): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
AND_CONDITION_SHORTHAND_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required("and"): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
OR_CONDITION_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "or",
vol.Required(CONF_CONDITIONS): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
OR_CONDITION_SHORTHAND_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required("or"): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
NOT_CONDITION_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "not",
vol.Required(CONF_CONDITIONS): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
NOT_CONDITION_SHORTHAND_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required("not"): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
DEVICE_CONDITION_BASE_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): "device",
vol.Required(CONF_DEVICE_ID): str,
vol.Required(CONF_DOMAIN): str,
vol.Remove("metadata"): dict,
}
)
DEVICE_CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
dynamic_template_condition_action = vol.All(
# Wrap a shorthand template condition in a template condition
dynamic_template,
lambda config: {
CONF_VALUE_TEMPLATE: config,
CONF_CONDITION: "template",
},
)
CONDITION_SHORTHAND_SCHEMA = vol.Schema(
{
**CONDITION_BASE_SCHEMA,
vol.Required(CONF_CONDITION): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
CONDITION_SCHEMA: vol.Schema = vol.Schema(
vol.Any(
vol.All(
expand_condition_shorthand,
key_value_schemas(
CONF_CONDITION,
{
"and": AND_CONDITION_SCHEMA,
"device": DEVICE_CONDITION_SCHEMA,
"not": NOT_CONDITION_SCHEMA,
"numeric_state": NUMERIC_STATE_CONDITION_SCHEMA,
"or": OR_CONDITION_SCHEMA,
"state": STATE_CONDITION_SCHEMA,
"sun": SUN_CONDITION_SCHEMA,
"template": TEMPLATE_CONDITION_SCHEMA,
"time": TIME_CONDITION_SCHEMA,
"trigger": TRIGGER_CONDITION_SCHEMA,
"zone": ZONE_CONDITION_SCHEMA,
},
),
),
dynamic_template_condition_action,
)
)
dynamic_template_condition_action = vol.All(
# Wrap a shorthand template condition action in a template condition
vol.Schema(
{**CONDITION_BASE_SCHEMA, vol.Required(CONF_CONDITION): dynamic_template}
),
lambda config: {
**config,
CONF_VALUE_TEMPLATE: config[CONF_CONDITION],
CONF_CONDITION: "template",
},
)
CONDITION_ACTION_SCHEMA: vol.Schema = vol.Schema(
vol.All(
expand_condition_shorthand,
key_value_schemas(
CONF_CONDITION,
{
"and": AND_CONDITION_SCHEMA,
"device": DEVICE_CONDITION_SCHEMA,
"not": NOT_CONDITION_SCHEMA,
"numeric_state": NUMERIC_STATE_CONDITION_SCHEMA,
"or": OR_CONDITION_SCHEMA,
"state": STATE_CONDITION_SCHEMA,
"sun": SUN_CONDITION_SCHEMA,
"template": TEMPLATE_CONDITION_SCHEMA,
"time": TIME_CONDITION_SCHEMA,
"trigger": TRIGGER_CONDITION_SCHEMA,
"zone": ZONE_CONDITION_SCHEMA,
},
dynamic_template_condition_action,
"a list of conditions or a valid template",
),
)
)
TRIGGER_BASE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ALIAS): str,
vol.Required(CONF_PLATFORM): str,
vol.Optional(CONF_ID): str,
vol.Optional(CONF_VARIABLES): SCRIPT_VARIABLES_SCHEMA,
vol.Optional(CONF_ENABLED): boolean,
}
)
_base_trigger_validator_schema = TRIGGER_BASE_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
# This is first round of validation, we don't want to process the config here already,
# just ensure basics as platform and ID are there.
def _base_trigger_validator(value: Any) -> Any:
_base_trigger_validator_schema(value)
return value
TRIGGER_SCHEMA = vol.All(ensure_list, [_base_trigger_validator])
_SCRIPT_DELAY_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_DELAY): positive_time_period_template,
}
)
_SCRIPT_WAIT_TEMPLATE_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_WAIT_TEMPLATE): template,
vol.Optional(CONF_TIMEOUT): positive_time_period_template,
vol.Optional(CONF_CONTINUE_ON_TIMEOUT): boolean,
}
)
DEVICE_ACTION_BASE_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_DEVICE_ID): string,
vol.Required(CONF_DOMAIN): str,
vol.Remove("metadata"): dict,
}
)
DEVICE_ACTION_SCHEMA = DEVICE_ACTION_BASE_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
_SCRIPT_SCENE_SCHEMA = vol.Schema(
{**SCRIPT_ACTION_BASE_SCHEMA, vol.Required(CONF_SCENE): entity_domain("scene")}
)
_SCRIPT_REPEAT_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_REPEAT): vol.All(
{
vol.Exclusive(CONF_COUNT, "repeat"): vol.Any(vol.Coerce(int), template),
vol.Exclusive(CONF_FOR_EACH, "repeat"): vol.Any(
dynamic_template, vol.All(list, template_complex)
),
vol.Exclusive(CONF_WHILE, "repeat"): vol.All(
ensure_list, [CONDITION_SCHEMA]
),
vol.Exclusive(CONF_UNTIL, "repeat"): vol.All(
ensure_list, [CONDITION_SCHEMA]
),
vol.Required(CONF_SEQUENCE): SCRIPT_SCHEMA,
},
has_at_least_one_key(CONF_COUNT, CONF_FOR_EACH, CONF_WHILE, CONF_UNTIL),
),
}
)
_SCRIPT_CHOOSE_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_CHOOSE): vol.All(
ensure_list,
[
{
vol.Optional(CONF_ALIAS): string,
vol.Required(CONF_CONDITIONS): vol.All(
ensure_list, [CONDITION_SCHEMA]
),
vol.Required(CONF_SEQUENCE): SCRIPT_SCHEMA,
}
],
),
vol.Optional(CONF_DEFAULT): SCRIPT_SCHEMA,
}
)
_SCRIPT_WAIT_FOR_TRIGGER_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_WAIT_FOR_TRIGGER): TRIGGER_SCHEMA,
vol.Optional(CONF_TIMEOUT): positive_time_period_template,
vol.Optional(CONF_CONTINUE_ON_TIMEOUT): boolean,
}
)
_SCRIPT_IF_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_IF): vol.All(ensure_list, [CONDITION_SCHEMA]),
vol.Required(CONF_THEN): SCRIPT_SCHEMA,
vol.Optional(CONF_ELSE): SCRIPT_SCHEMA,
}
)
_SCRIPT_SET_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_VARIABLES): SCRIPT_VARIABLES_SCHEMA,
}
)
_SCRIPT_STOP_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_STOP): vol.Any(None, string),
vol.Optional(CONF_ERROR, default=False): boolean,
}
)
_SCRIPT_PARALLEL_SEQUENCE = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_SEQUENCE): SCRIPT_SCHEMA,
}
)
_parallel_sequence_action = vol.All(
# Wrap a shorthand sequences in a parallel action
SCRIPT_SCHEMA,
lambda config: {
CONF_SEQUENCE: config,
},
)
_SCRIPT_PARALLEL_SCHEMA = vol.Schema(
{
**SCRIPT_ACTION_BASE_SCHEMA,
vol.Required(CONF_PARALLEL): vol.All(
ensure_list, [vol.Any(_SCRIPT_PARALLEL_SEQUENCE, _parallel_sequence_action)]
),
}
)
SCRIPT_ACTION_DELAY = "delay"
SCRIPT_ACTION_WAIT_TEMPLATE = "wait_template"
SCRIPT_ACTION_CHECK_CONDITION = "condition"
SCRIPT_ACTION_FIRE_EVENT = "event"
SCRIPT_ACTION_CALL_SERVICE = "call_service"
SCRIPT_ACTION_DEVICE_AUTOMATION = "device"
SCRIPT_ACTION_ACTIVATE_SCENE = "scene"
SCRIPT_ACTION_REPEAT = "repeat"
SCRIPT_ACTION_CHOOSE = "choose"
SCRIPT_ACTION_WAIT_FOR_TRIGGER = "wait_for_trigger"
SCRIPT_ACTION_VARIABLES = "variables"
SCRIPT_ACTION_STOP = "stop"
SCRIPT_ACTION_IF = "if"
SCRIPT_ACTION_PARALLEL = "parallel"
def determine_script_action(action: dict[str, Any]) -> str:
"""Determine action type."""
if CONF_DELAY in action:
return SCRIPT_ACTION_DELAY
if CONF_WAIT_TEMPLATE in action:
return SCRIPT_ACTION_WAIT_TEMPLATE
if any(key in action for key in (CONF_CONDITION, "and", "or", "not")):
return SCRIPT_ACTION_CHECK_CONDITION
if CONF_EVENT in action:
return SCRIPT_ACTION_FIRE_EVENT
if CONF_DEVICE_ID in action:
return SCRIPT_ACTION_DEVICE_AUTOMATION
if CONF_SCENE in action:
return SCRIPT_ACTION_ACTIVATE_SCENE
if CONF_REPEAT in action:
return SCRIPT_ACTION_REPEAT
if CONF_CHOOSE in action:
return SCRIPT_ACTION_CHOOSE
if CONF_WAIT_FOR_TRIGGER in action:
return SCRIPT_ACTION_WAIT_FOR_TRIGGER
if CONF_VARIABLES in action:
return SCRIPT_ACTION_VARIABLES
if CONF_IF in action:
return SCRIPT_ACTION_IF
if CONF_SERVICE in action or CONF_SERVICE_TEMPLATE in action:
return SCRIPT_ACTION_CALL_SERVICE
if CONF_STOP in action:
return SCRIPT_ACTION_STOP
if CONF_PARALLEL in action:
return SCRIPT_ACTION_PARALLEL
raise ValueError("Unable to determine action")
ACTION_TYPE_SCHEMAS: dict[str, Callable[[Any], dict]] = {
SCRIPT_ACTION_CALL_SERVICE: SERVICE_SCHEMA,
SCRIPT_ACTION_DELAY: _SCRIPT_DELAY_SCHEMA,
SCRIPT_ACTION_WAIT_TEMPLATE: _SCRIPT_WAIT_TEMPLATE_SCHEMA,
SCRIPT_ACTION_FIRE_EVENT: EVENT_SCHEMA,
SCRIPT_ACTION_CHECK_CONDITION: CONDITION_ACTION_SCHEMA,
SCRIPT_ACTION_DEVICE_AUTOMATION: DEVICE_ACTION_SCHEMA,
SCRIPT_ACTION_ACTIVATE_SCENE: _SCRIPT_SCENE_SCHEMA,
SCRIPT_ACTION_REPEAT: _SCRIPT_REPEAT_SCHEMA,
SCRIPT_ACTION_CHOOSE: _SCRIPT_CHOOSE_SCHEMA,
SCRIPT_ACTION_WAIT_FOR_TRIGGER: _SCRIPT_WAIT_FOR_TRIGGER_SCHEMA,
SCRIPT_ACTION_VARIABLES: _SCRIPT_SET_SCHEMA,
SCRIPT_ACTION_STOP: _SCRIPT_STOP_SCHEMA,
SCRIPT_ACTION_IF: _SCRIPT_IF_SCHEMA,
SCRIPT_ACTION_PARALLEL: _SCRIPT_PARALLEL_SCHEMA,
}
# Validate currencies adopted by countries
currency = vol.In(
{
"AED",
"AFN",
"ALL",
"AMD",
"ANG",
"AOA",
"ARS",
"AUD",
"AWG",
"AZN",
"BAM",
"BBD",
"BDT",
"BGN",
"BHD",
"BIF",
"BMD",
"BND",
"BOB",
"BRL",
"BSD",
"BTN",
"BWP",
"BYN",
"BYR",
"BZD",
"CAD",
"CDF",
"CHF",
"CLP",
"CNY",
"COP",
"CRC",
"CUP",
"CVE",
"CZK",
"DJF",
"DKK",
"DOP",
"DZD",
"EGP",
"ERN",
"ETB",
"EUR",
"FJD",
"FKP",
"GBP",
"GEL",
"GHS",
"GIP",
"GMD",
"GNF",
"GTQ",
"GYD",
"HKD",
"HNL",
"HRK",
"HTG",
"HUF",
"IDR",
"ILS",
"INR",
"IQD",
"IRR",
"ISK",
"JMD",
"JOD",
"JPY",
"KES",
"KGS",
"KHR",
"KMF",
"KPW",
"KRW",
"KWD",
"KYD",
"KZT",
"LAK",
"LBP",
"LKR",
"LRD",
"LSL",
"LTL",
"LYD",
"MAD",
"MDL",
"MGA",
"MKD",
"MMK",
"MNT",
"MOP",
"MRO",
"MUR",
"MVR",
"MWK",
"MXN",
"MYR",
"MZN",
"NAD",
"NGN",
"NIO",
"NOK",
"NPR",
"NZD",
"OMR",
"PAB",
"PEN",
"PGK",
"PHP",
"PKR",
"PLN",
"PYG",
"QAR",
"RON",
"RSD",
"RUB",
"RWF",
"SAR",
"SBD",
"SCR",
"SDG",
"SEK",
"SGD",
"SHP",
"SLL",
"SOS",
"SRD",
"SSP",
"STD",
"SYP",
"SZL",
"THB",
"TJS",
"TMT",
"TND",
"TOP",
"TRY",
"TTD",
"TWD",
"TZS",
"UAH",
"UGX",
"USD",
"UYU",
"UZS",
"VEF",
"VND",
"VUV",
"WST",
"XAF",
"XCD",
"XOF",
"XPF",
"YER",
"ZAR",
"ZMK",
"ZMW",
"ZWL",
},
msg="invalid ISO 4217 formatted currency",
)
| {
"content_hash": "e91c8fe0baba3573ba33346371adf8dc",
"timestamp": "",
"source": "github",
"line_count": 1820,
"max_line_length": 106,
"avg_line_length": 28.42197802197802,
"alnum_prop": 0.5970654191153727,
"repo_name": "w1ll1am23/home-assistant",
"id": "35191d7704245eed70b2994392ef42492dd455c7",
"size": "51728",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/config_validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.webpubsub import WebPubSubManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-webpubsub
# USAGE
python web_pub_sub_list_keys.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = WebPubSubManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.web_pub_sub.list_keys(
resource_group_name="myResourceGroup",
resource_name="myWebPubSubService",
)
print(response)
# x-ms-original-file: specification/webpubsub/resource-manager/Microsoft.SignalRService/preview/2022-08-01-preview/examples/WebPubSub_ListKeys.json
if __name__ == "__main__":
main()
| {
"content_hash": "c40a49daff09af86650d53a443d5efee",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 147,
"avg_line_length": 34.18181818181818,
"alnum_prop": 0.7375886524822695,
"repo_name": "Azure/azure-sdk-for-python",
"id": "05ad703ad8f9ff51f23c43f3993167bbf6cf4d19",
"size": "1596",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/webpubsub/azure-mgmt-webpubsub/generated_samples/web_pub_sub_list_keys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def cross_entropy2d(input, target, weight=None, size_average=True):
n, c, h, w = input.size()
log_p = F.log_softmax(input)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
log_p = log_p.view(-1, c)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, size_average=False)
if size_average:
loss /= mask.data.sum()
return loss | {
"content_hash": "f56a2428ee9519cfe6c2cfe0f1feefe1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 30.57894736842105,
"alnum_prop": 0.6247848537005164,
"repo_name": "ibadami/pytorch-semseg",
"id": "b1bd325392fc6fdc30ab870dfc2dd66ad198b1e0",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ptsemseg/loss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62160"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Food',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('slug', models.SlugField(blank=True)),
('quantity', models.IntegerField()),
('unit', models.CharField(max_length=30)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('slug', models.SlugField(blank=True)),
('ingredients', models.ManyToManyField(to='food.Food')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| {
"content_hash": "dd56285bbb0dfb9a433b6a04065a0d0c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 114,
"avg_line_length": 31.94871794871795,
"alnum_prop": 0.5008025682182986,
"repo_name": "kevinharvey/fridge",
"id": "babf3d90ccb20104aaf30621c52c7bdd5be27c89",
"size": "1270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "food/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7383"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
from oslo_db import exception as oslo_db_exc
import sqlalchemy as sa
from sqlalchemy.orm import exc as db_exc
from congress.db import api as db
from congress.db import model_base
from congress.db import utils as db_utils
class LibraryPolicy(model_base.BASE, model_base.HasId):
__tablename__ = 'library_policies'
name = sa.Column(sa.String(255), nullable=False, unique=True)
abbreviation = sa.Column(sa.String(5), nullable=False)
description = sa.Column(sa.Text(), nullable=False)
kind = sa.Column(sa.Text(), nullable=False)
rules = sa.Column(sa.Text(), nullable=False)
def to_dict(self, include_rules=True, json_rules=False):
"""From a given library policy, return a policy dict.
:param: include_rules (bool, optional): include policy rules in return
dictionary. Defaults to False.
"""
if not include_rules:
d = {'id': self.id,
'name': self.name,
'abbreviation': self.abbreviation,
'description': self.description,
'kind': self.kind}
else:
d = {'id': self.id,
'name': self.name,
'abbreviation': self.abbreviation,
'description': self.description,
'kind': self.kind,
'rules': (self.rules if json_rules
else json.loads(self.rules))}
return d
@db_utils.retry_on_db_error
def add_policy(policy_dict, session=None):
session = session or db.get_session()
try:
with session.begin(subtransactions=True):
new_row = LibraryPolicy(
name=policy_dict['name'],
abbreviation=policy_dict['abbreviation'],
description=policy_dict['description'],
kind=policy_dict['kind'],
rules=json.dumps(policy_dict['rules']))
session.add(new_row)
return new_row
except oslo_db_exc.DBDuplicateEntry:
raise KeyError(
"Policy with name %s already exists" % policy_dict['name'])
@db_utils.retry_on_db_error
def replace_policy(id_, policy_dict, session=None):
session = session or db.get_session()
try:
with session.begin(subtransactions=True):
new_row = LibraryPolicy(
id=id_,
name=policy_dict['name'],
abbreviation=policy_dict['abbreviation'],
description=policy_dict['description'],
kind=policy_dict['kind'],
rules=json.dumps(policy_dict['rules']))
session.query(LibraryPolicy).filter(
LibraryPolicy.id == id_).one().update(
new_row.to_dict(include_rules=True, json_rules=True))
return new_row
except db_exc.NoResultFound:
raise KeyError('No policy found with policy id %s' % id_)
@db_utils.retry_on_db_error
def delete_policy(id_, session=None):
session = session or db.get_session()
return session.query(LibraryPolicy).filter(
LibraryPolicy.id == id_).delete()
@db_utils.retry_on_db_error
def delete_policies(session=None):
session = session or db.get_session()
return session.query(LibraryPolicy).delete()
@db_utils.retry_on_db_error
def get_policy(id_, session=None):
session = session or db.get_session()
try:
return session.query(LibraryPolicy).filter(
LibraryPolicy.id == id_).one()
except db_exc.NoResultFound:
raise KeyError('No policy found with policy id %s' % id_)
@db_utils.retry_on_db_error
def get_policy_by_name(name, session=None):
session = session or db.get_session()
try:
return session.query(LibraryPolicy).filter(
LibraryPolicy.name == name).one()
except db_exc.NoResultFound:
raise KeyError('No policy found with policy name %s' % name)
@db_utils.retry_on_db_error
def get_policies(session=None):
session = session or db.get_session()
return (session.query(LibraryPolicy).all())
| {
"content_hash": "bc104c947543274eb29a1f77ed02e4d4",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 34.09836065573771,
"alnum_prop": 0.6139423076923077,
"repo_name": "openstack/congress",
"id": "8b6750a56b9c8ab92c799ce6cdb569f73c395e22",
"size": "4790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/db/db_library_policies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "228"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "2614028"
},
{
"name": "Shell",
"bytes": "45786"
}
],
"symlink_target": ""
} |
class LandingPage(object):
BUTTON_LOGIN = "Login"
PROGRAM_INFO_TAB = "Program Info"
class PageHeader(object):
# dropdown toggle
PROPLE_LIST_WIDGET = "Admin Dashboard"
class LhnMenu(object):
# create new program
DATE_FORMATTING = "%d/%m/%Y"
OBJECT_REVIEW = "Object Review"
PRIVATE_PROGRAM = "Private Program"
DESCRIPTION = "Description"
NOTES = "Notes"
MANAGER = "Manager"
PROGRAM_URL = "Program URL"
STATE = "State"
PRIMARY_CONTACT = "Primary Contact"
SECONDARY_CONTACT = "Secondary Contact"
REFERENCE_URL = "Reference URL"
CODE = "Code"
EFFECTIVE_DATE = "Effective Date"
STOP_DATE = "Stop Date"
class WidgetBar(object):
# dropdown
CLAUSES = "Clauses"
CONTRACTS = "Contracts"
DATA_ASSETS = "Data Assets"
FACILITIES = "Facilities"
MARKETS = "Markets"
ORG_GROUPS = "Org Groups"
POLICIES = "Policies"
PROCESSES = "Processes"
PRODUCTS = "Products"
PROJECTS = "Projects"
STANDARDS = "Standards"
SYSTEMS = "Systems"
VENDORS = "Vendors"
THREAD_ACTORS = "Thread Actors"
RISKS = "Risks"
TASKS = "Tasks"
class WidgetProgramInfo(object):
SUBMIT_FOR_REVIEW = "Submit For Review"
# state in lhn_modal create new page
DRAFT = "Draft"
FINAL = "Final"
EFFECTIVE = "Effective"
INEFFECTIVE = "Ineffective"
LAUNCHED = "Launched"
NOT_LAUNCHED = "Not Launched"
IN_SCOPE = "In Scope"
NOT_IN_SCOPE = "Not in Scope"
DEPRECATED = "Deprecated"
# button settings dropdown elements
EDIT_PROGRAM = "Edit Program"
GET_PERMALINK = "Get permalink"
DELETE = "Delete"
BUTTON_SETTINGS_DROPDOWN_ITEMS = [EDIT_PROGRAM, GET_PERMALINK, DELETE]
ALERT_LINK_COPIED = "Link has been copied to your clipboard."
| {
"content_hash": "cdfeb19dbc6fa9d873498f5a5d7187f6",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 72,
"avg_line_length": 24.405797101449274,
"alnum_prop": 0.6876484560570071,
"repo_name": "jmakov/ggrc-core",
"id": "d19d57bee42de7cdc0c304805e9308456ec27027",
"size": "1929",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/selenium/src/lib/constants/element.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "205917"
},
{
"name": "Cucumber",
"bytes": "140389"
},
{
"name": "HTML",
"bytes": "1051437"
},
{
"name": "JavaScript",
"bytes": "1350860"
},
{
"name": "Makefile",
"bytes": "6114"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "2166612"
},
{
"name": "Ruby",
"bytes": "1508"
},
{
"name": "Shell",
"bytes": "36102"
}
],
"symlink_target": ""
} |
import time
from pulsar import HttpException, MethodNotAllowed, ImproperlyConfigured
from pulsar.apps.wsgi import Json
from lux import Parameter
from ..views import RestRouter, AuthenticationError
try:
import jwt
except ImportError:
jwt = None
from .. import AuthBackend
from ..forms import LoginForm
class Http401(HttpException):
def __init__(self, auth, msg=''):
headers = [('WWW-Authenticate', auth)]
super().__init__(msg=msg, status=401, headers=headers)
class TokenBackend(AuthBackend):
'''Backend based on JWT_
Requires pyjwt_ package.
.. _pyjwt: https://pypi.python.org/pypi/PyJWT
.. _JWT: http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html
'''
_config = [
Parameter('AUTHORIZATION_URL', '/authorizations',
'Url for authorizations',
True),
]
def on_config(self, app):
if not jwt:
raise ImproperlyConfigured('JWT library not available')
def api_sections(self, app):
yield Authorization(app.config['AUTHORIZATION_URL'])
def request(self, request):
'''Check for ``HTTP_AUTHORIZATION`` header and if it is available
and the authentication type if ``bearer`` try to perform
authentication using JWT_.
'''
auth = request.get('HTTP_AUTHORIZATION')
user = request.cache.user
if auth and user.is_anonymous():
auth_type, key = auth.split(None, 1)
auth_type = auth_type.lower()
if auth_type == 'bearer':
try:
data = jwt.decode(key, self.secret_key)
except jwt.ExpiredSignature:
request.app.logger.info('JWT token has expired')
# In this case we want the client to perform
# a new authentication. Raise 401
raise Http401('Token')
except Exception:
request.app.logger.exception('Could not load user')
else:
user = self.get_user(request, **data)
if user:
request.cache.user = user
def response(self, environ, response):
name = 'Access-Control-Allow-Origin'
if name not in response.headers:
origin = environ.get('HTTP_ORIGIN', '*')
response[name] = origin
return response
def response_middleware(self, app):
return [self.response]
def on_preflight(self, app, request):
'''Preflight handler
'''
headers = request.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS')
response = request.response
origin = request.get('HTTP_ORIGIN', '*')
if origin == 'null':
origin = '*'
response['Access-Control-Allow-Origin'] = origin
if headers:
response['Access-Control-Allow-Headers'] = headers
def create_token(self, request, user):
'''Create the token
'''
payload = self.jwt_payload(request, user)
return self.encode_payload(self.jwt_payload(request, user))
def jwt_payload(self, request, user):
expiry = self.session_expiry(request)
payload = {'user_id': user.id,
'superuser': user.is_superuser()}
if expiry:
payload['exp'] = int(time.mktime(expiry.timetuple()))
return payload
def encode_payload(self, request, payload):
return jwt.encode(payload, request.config['SECRET_KEY'])
class Authorization(RestRouter):
form = LoginForm
def post(self, request):
'''Create a new Authorization token
'''
user = request.cache.user
if user.is_authenticated():
raise MethodNotAllowed
form = self.form(request, data=request.body_data())
if form.is_valid():
auth_backend = request.cache.auth_backend
try:
user = auth_backend.authenticate(request, **form.cleaned_data)
auth_backend.login(request, user)
except AuthenticationError as e:
form.add_error_message(str(e))
else:
token = auth_backend.create_token(request, user)
token = token.decode('utf-8')
request.response.status_code = 201
return Json({'token': token}).http_response(request)
return Json(form.tojson()).http_response(request)
| {
"content_hash": "259d6a3444f663e9769327af68e5956e",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 78,
"avg_line_length": 32.42028985507246,
"alnum_prop": 0.5809119356280733,
"repo_name": "tazo90/lux",
"id": "18dbf6935c21c48b802f756891fce7e30f59ec50",
"size": "4474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lux/extensions/rest/backends/token.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85029"
},
{
"name": "HTML",
"bytes": "17331"
},
{
"name": "JavaScript",
"bytes": "354892"
},
{
"name": "Python",
"bytes": "543161"
}
],
"symlink_target": ""
} |
"""This module contains Google Compute Engine links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
COMPUTE_BASE_LINK = "https://console.cloud.google.com/compute"
COMPUTE_LINK = (
COMPUTE_BASE_LINK + "/instancesDetail/zones/{location_id}/instances/{resource_id}?project={project_id}"
)
COMPUTE_TEMPLATE_LINK = COMPUTE_BASE_LINK + "/instanceTemplates/details/{resource_id}?project={project_id}"
COMPUTE_GROUP_MANAGER_LINK = (
COMPUTE_BASE_LINK + "/instanceGroups/details/{location_id}/{resource_id}?project={project_id}"
)
class ComputeInstanceDetailsLink(BaseGoogleLink):
"""Helper class for constructing Compute Instance details Link"""
name = "Compute Instance details"
key = "compute_instance_details"
format_str = COMPUTE_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
location_id: str,
resource_id: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=ComputeInstanceDetailsLink.key,
value={
"location_id": location_id,
"resource_id": resource_id,
"project_id": project_id,
},
)
class ComputeInstanceTemplateDetailsLink(BaseGoogleLink):
"""Helper class for constructing Compute Instance Template details Link"""
name = "Compute Instance Template details"
key = "compute_instance_template_details"
format_str = COMPUTE_TEMPLATE_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
resource_id: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=ComputeInstanceTemplateDetailsLink.key,
value={
"resource_id": resource_id,
"project_id": project_id,
},
)
class ComputeInstanceGroupManagerDetailsLink(BaseGoogleLink):
"""Helper class for constructing Compute Instance Group Manager details Link"""
name = "Compute Instance Group Manager"
key = "compute_instance_group_manager_details"
format_str = COMPUTE_GROUP_MANAGER_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
location_id: str,
resource_id: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=ComputeInstanceGroupManagerDetailsLink.key,
value={
"location_id": location_id,
"resource_id": resource_id,
"project_id": project_id,
},
)
| {
"content_hash": "943449b2e6c5f0dbb01261a720d0efb7",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 107,
"avg_line_length": 30.210526315789473,
"alnum_prop": 0.6327526132404181,
"repo_name": "apache/airflow",
"id": "c2f15b273004bdbd54f19e317d82dfad3a7482a6",
"size": "3657",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/links/compute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
"""
gcd and lcm.
A fact that helped me -
If two numbers have no prime factors in common, the lowest common multiple will be equal to the product of the two numbers
Ref:
Comments:
1) Compute the Lowest Common Multiple (LCM) of A.
2) Compute the Greatest Common Divisor (GCD) of B.
If x is 'between' A and B, then it will be a multiple of LCM(A) and a divisor of GCD(B). So we can now check any value x without having to step through A and B.
3)If LCM(A) is not a factor of GCD(B), then no x can be 'between' A and B so we just print 0 and quit.
4) If LCM(A) is a factor of GCD(B), then the number of multiples of LCM(A) that are factors of GCD(B) is the same as the number of integer factors of GCD(B) / LCM(A). So the problem is now reduced to integer factorisation.
While the simple approach won't work for 50 digit numbers(but still fast enough for a 64 bit number if we stop at the squre root), there are a lot of really clever algorithms like the Quadratic Sieve and Lenstras' Elliptic Curve Method that are really fast. I'm not entirely sure how they work, but I have copy pasted code into my IDE and tried them out, it is indeed possible to factorise 50 digit numbers without timing out on HackerRank.
-8 | Add Comment Parent Permalink
Bad_Jim about 6 hours ago
This is more for the hypothetical scenario where the constraints are much higher and everything has to be done with maximum efficiency. If you simply want your 10 points, the easiest thing to do is ignore all these optimizations and write a brute force solution.
0 | Add Comment Parent Permalink
hacker_am1xh53a about 5 hours ago
For higher constraints it does not look very efficient, as calculating LCM(A) is very expensive, say, if A are up to 10^17, N and M up to 10^6, LCM(A) might be of 16 million digits, and calculating it straightforward way would cost about 10^12 operations.
Though it is possible to stop calculations when LCM become larger than the limit for Bi, it wouldrequire more accurate calculations, probably of higher precision. LCM calculation is better replaced with calculation of some GCD.
And in the end we might stop at cubic instead of square root if we check the remainder for primality, because we do not need exact factors, only their count and multiplicity.
0 | Add Comment Parent Permalink
Bad_Jim about 3 hours ago
LCM(A) probably isn't the issue. If it gets larger than GCD(B) then we just print zero and stop. And we can calculate the LCM like this:
LCM(y,z) = y*z / GCD(y,z)
So LCM is a little bit more complicated than GCD, but the calculation of GCD(B) necessarily deals with larger numbers. Though it is possible that B might have far less values than A.
I like the idea of going up to the cube root, but modern factorisation techniques are still much faster. However they aren't systematic searches like trial division in the sense that you can say all factors below the cube root have been found. You just keep finding factors and testing primality until you run out of composite numbers.
gcd and lcm.
A fact that helped me -
If two numbers have no prime factors in common, the lowest common multiple will be equal to the product of the two numbers
"""
import unittest
def common_factors_of_set(s, factors=set()):
factors = set(factors)
# find all factors for the smallest s
to_factor = min(s)
for i in range(1, to_factor + 1):
if to_factor % i == 0:
factors.add(i)
s.remove(to_factor)
for num in s:
to_remove = set()
for factor in factors:
if num % factor != 0:
to_remove.add(factor)
factors = factors.difference(to_remove)
return factors
def filter_by_factors(s, factors):
s = set(s)
to_delete = set()
for num in s:
for factor in factors:
if num % factor != 0:
to_delete.add(num)
break
return s.difference(to_delete)
class MyTestCases(unittest.TestCase):
def test_common_factors_of_set(self):
factors = common_factors_of_set([16, 32, 96])
print(filter_by_factors(factors, [4, 2]))
factors = common_factors_of_set([1])
print(filter_by_factors(factors, [3, 5, 11]))
factors = common_factors_of_set([3, 4])
print(filter_by_factors(factors, [6]))
| {
"content_hash": "46555c23f12446a9a78f10517d02ed9d",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 440,
"avg_line_length": 56.473684210526315,
"alnum_prop": 0.7199440820130475,
"repo_name": "MFry/pyAlgoDataStructures",
"id": "3a0b04c7a8391c67c89afd0400de4679aee3ee04",
"size": "4292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hacker_rank/WoC_25/between_two_sets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "243997"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
from launchkey import SDK_VERSION
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'setuptools >= 40.4.3',
'requests >= 2.5.1, < 3.0.0',
'python-dateutil >= 2.4.2, < 3.0.0',
'formencode >= 2.0.0, < 3.0.0',
'pyjwkest >= 1.3.2, < 2.0.0',
'pycryptodomex >= 3.4.12, < 4.0.0',
'urllib3 >=1.26.5, < 2.0.0',
'pytz'
]
setup(name='launchkey',
version=SDK_VERSION,
description='LaunchKey Python SDK',
long_description=README + '\n\n' + CHANGES + '\n',
long_description_content_type="text/x-rst",
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: System :: Systems Administration :: Authentication/Directory",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Natural Language :: English",
"Intended Audience :: Developers",
],
author='iovation',
url='https://github.com/iovation/launchkey-python',
keywords='launchkey security authentication iovation multifactor mfa 2fa biometric',
license='MIT',
py_modules=[
'launchkey',
],
packages=[
'launchkey',
'launchkey.clients',
'launchkey.entities',
'launchkey.entities.service',
'launchkey.exceptions',
'launchkey.factories',
'launchkey.transports',
'launchkey.utils'
],
zip_safe=False,
test_suite='tests',
install_requires=requires,
tests_require=[
'nose >= 1.3.0, < 2.0.0',
'nose-exclude >= 0.5.0, < 1.0.0',
'mock >= 2.0.0, < 3.0.0',
'ddt >= 1.1.1, < 2.0.0'
],
project_urls={
'Bug Reports': 'https://github.com/iovation/launchkey-python/issues',
'Documentation': 'https://docs.launchkey.com/service-sdk/python/sdk-v3/',
'Administration': 'https://admin.launchkey.com/',
},
)
| {
"content_hash": "10aaae16414b42111d0ceff34beb9ca9",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 90,
"avg_line_length": 35.2112676056338,
"alnum_prop": 0.562,
"repo_name": "LaunchKey/launchkey-python",
"id": "10d49d568d0b041216117109080d0a11d7119c6c",
"size": "2500",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32510"
}
],
"symlink_target": ""
} |
"""A hook for modifying parameter values read from the WMT client."""
def execute(env):
env['model_output__opt_time_interval'] = env['_update_time_step']
| {
"content_hash": "ce8110953489fb50e66fe3fe3135e9ff",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 69,
"avg_line_length": 39.75,
"alnum_prop": 0.6981132075471698,
"repo_name": "csdms/wmt-metadata",
"id": "6692692dc9dd6512bc7b1cf937c662895913dcb0",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata/PyDeltaRCM/hooks/pre-stage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72895"
}
],
"symlink_target": ""
} |
"""
byceps.blueprints.admin.shop.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from ....util.framework.blueprint import create_blueprint
blueprint = create_blueprint('shop_admin', __name__)
| {
"content_hash": "7154f162d6a724776244e4410d2eb466",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 23.75,
"alnum_prop": 0.6596491228070176,
"repo_name": "m-ober/byceps",
"id": "9c565e84343f2292b985a3c5785e272535074045",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byceps/blueprints/admin/shop/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38499"
},
{
"name": "Dockerfile",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "369989"
},
{
"name": "JavaScript",
"bytes": "9483"
},
{
"name": "Python",
"bytes": "1152996"
}
],
"symlink_target": ""
} |
import sure
from django.test import TestCase
from tasks.const import STATUS_SUCCESS
from ..coverage import coverage_violation
from .base import get_content
class CoverageViolationCase(TestCase):
"""Coverage violation case"""
def test_result(self):
"""Test coverage violation result"""
data = {'raw': get_content('coverage.out')}
result = coverage_violation(data)
result['status'].should.be.equal(STATUS_SUCCESS)
result['plot']['cover'].should.be.equal(86)
result['success_percent'].should.be.equal(86)
def test_issue_1_out(self):
"""Test out of #1 issue"""
data = {'raw': get_content('coverage_issue_1.out')}
result = coverage_violation(data)
result['status'].should.be.equal(STATUS_SUCCESS)
result['plot']['cover'].should.be.equal(100)
| {
"content_hash": "3ee562bd29650aedc16945b51d2ad062",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 59,
"avg_line_length": 35.041666666666664,
"alnum_prop": 0.6563614744351962,
"repo_name": "nvbn/coviolations_web",
"id": "a2e13d042756b740ee1a2ff8e75745d92f9d7034",
"size": "841",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "violations/tests/test_coverage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6025"
},
{
"name": "CoffeeScript",
"bytes": "30912"
},
{
"name": "Puppet",
"bytes": "729"
},
{
"name": "Python",
"bytes": "330675"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
import datetime
import math
import RPi.GPIO as GPIO
class TriggerService:
_gewAbstand = float
_timeLastBang = datetime.datetime
_startTime = datetime.datetime
_vMeanSinceLastBang = []
def __init__(self,GuiService,GpsImportService):
self.GuiService = GuiService
self.GpsImportService = GpsImportService
self._startTime = datetime.datetime.now()
self._timeLastBang = datetime.datetime.now() - self._startTime
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(3, GPIO.OUT)
GPIO.output(3, False)
self._gewAbstand = self.GuiService.getDesiredDistance()
def go(self):
self.trigger(self.GpsImportService.getVelocity())
def trigger(self,vMeanNew):
vGes = 0.0
self._vMeanSinceLastBang.append(vMeanNew)
for element in self._vMeanSinceLastBang:
vGes += element
vMean = vGes / len(self._vMeanSinceLastBang)
vergleich = datetime.datetime.now()-self._startTime
dTime = self.getTimeDiff(self._timeLastBang,vergleich)
GPIO.output(3, False)
if (dTime*vMean) >= self._gewAbstand:
self.giveSignal()
self._timeLastBang = datetime.datetime.now() -self._startTime
del self._vMeanSinceLastBang[:]
def giveSignal(self):
GPIO.output(3, True)
def getTimeDiff(self,time1,time2):
if time1 >= time2:
dt = time1 - time2
else:
dt = time2 - time1
dtSec = dt.days*24*60*60 + dt.seconds + dt.microseconds/1000000
return dtSec
| {
"content_hash": "2e2439a77a725a82ad618972785f5fdb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 33.5,
"alnum_prop": 0.628731343283582,
"repo_name": "madzing/T-Box",
"id": "6d6cab3560579d73febaf4c50ad8d67baf2073a8",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Triggerservice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25241"
},
{
"name": "Shell",
"bytes": "57"
}
],
"symlink_target": ""
} |
from .config import Config
from .pusher import Pusher
__all__ = [
'Config',
'Pusher',
]
| {
"content_hash": "23993cea0b8f19ab0369ba2abb651374",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 26,
"avg_line_length": 13.857142857142858,
"alnum_prop": 0.6082474226804123,
"repo_name": "pusher/pusher-python-rest",
"id": "9a313d7f1c2cac2897bfa5ed4492e34572351808",
"size": "122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pusher/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "119"
},
{
"name": "Python",
"bytes": "35039"
}
],
"symlink_target": ""
} |
from lxml import etree as et
arights = ('read-only', 'read-write', 'write-only', 'writeOnce', 'read-writeOnce')
__version__ = '1.0.1'
default_xml = ('<device><name>NEW_DEVICE</name>'
'<version>1.0</version>'
'<description>Default CMSIS device</description>'
'<addressUnitBits>8</addressUnitBits>'
'<width>32</width>'
'<size>0x20</size>'
'<access>read-write</access>'
'<resetValue>0x00000000</resetValue>'
'<resetMask>0xFFFFFFFF</resetMask>'
'<peripherals><peripheral>'
'<name>NEW_PERIPHERAL</name>'
'<groupName>DEVICE PERIPHERALS</groupName>'
'<baseAddress>0xDEADBEEF</baseAddress>'
'<addressBlock><offset>0x00</offset><size>0x400</size><usage>registers</usage></addressBlock>'
'<interrupt><name>NEW_INTERRUPT</name><description>Default interrupt</description><value>1</value></interrupt>'
'<registers><register>'
'<name>NEW_REGISTER</name><displayName>NEW_REGISTER</displayName>'
'<description>Default register</description>'
'<addressOffset>0x00</addressOffset>'
'<fields><field><name>NEW_BITFIELD</name><description>Default bitfield</description>'
'<bitOffset>0</bitOffset><bitWidth>1</bitWidth></field></fields>'
'</register></registers>'
'</peripheral></peripherals></device>'
)
def str_cleanup(s):
try:
s = s.encode('ascii', errors='ignore')
return ' '.join(s.split())
except:
return None
def toInt(val, fault=None):
try:
return int(val, 0)
except:
return fault
def get_from_xml(node, attr):
try:
return node.find(attr).text
except:
return None
class basedata(object):
def __init__(self, parent=None):
self.parent = parent
self._name = 'new'
self._desc = None
self._rsize = None
self.rvalue = None
self._access = None
@property
def name(self):
return self._name
@name.setter
def name(self, val):
try:
s = val.encode('ascii', errors='ignore')
self._name = '_'.join(s.split())
except:
pass
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, val):
self._desc = str_cleanup(val)
@property
def access(self):
return self._access
@access.setter
def access(self, val):
self._access = val if val in arights else None
@property
def rsize(self):
return '0x{0:02X}'.format(self._rsize) if self._rsize else None
@rsize.setter
def rsize(self, val):
self._rsize = toInt(val)
@property
def vsize(self):
if self._rsize:
return self.rsize
else:
if self.parent:
return self.parent.vsize
else:
return 0
@property
def vvalue(self):
if self.rvalue:
return self.rvalue
else:
if self.parent:
return self.parent.vvalue
else:
return None
@property
def vaccess(self):
if self.access:
return self.access
else:
if self.parent:
return self.parent.vaccess
else:
return 'undefined'
class field(basedata):
def __init__(self, parent, xml=None):
basedata.__init__(self, parent)
self._bitw = 1
self._bito = 0
if xml is not None:
self.fromXML(xml)
@property
def bitw(self):
return str(self._bitw)
@bitw.setter
def bitw(self, val):
self._bitw = toInt(val, self._bitw)
@property
def bito(self):
return str(self._bito)
@bito.setter
def bito(self, val):
self._bito = toInt(val, self._bito)
@property
def valid(self):
if self.name and self.desc and self.bito and self.bitw:
if (self._bito + self._bitw) <= int(self.vsize, 0):
return True
return False
def fromXML(self, node):
self.name = get_from_xml(node, 'name')
self.desc = get_from_xml(node, 'description')
self.bitw = get_from_xml(node, 'bitWidth')
self.bito = get_from_xml(node, 'bitOffset')
self.access = get_from_xml(node, 'access')
def toXML(self, node=None):
if node is None:
node = et.Element('field')
et.SubElement(node, 'name').text = self.name
if self.desc:
et.SubElement(node, 'description').text = self.desc
et.SubElement(node, 'bitOffset').text = self.bito
et.SubElement(node, 'bitWidth').text = self.bitw
if self.access:
et.SubElement(node, 'access').text = self.access
return node
class register(basedata):
def __init__(self, parent, xml=None):
basedata.__init__(self, parent)
self._dispname = None
self._offset = 0
self.fields = []
if xml is not None:
self.fromXML(xml)
@property
def dispname(self):
return self._dispname
@dispname.setter
def dispname(self, val):
self._dispname = str_cleanup(val)
@property
def offset(self):
return '0x{0:04X}'.format(self._offset)
@offset.setter
def offset(self, val):
self._offset = toInt(val, self._offset)
@property
def valid(self):
return (self.name and self.desc)
def fromXML(self, node):
del self.fields[:]
self.name = get_from_xml(node, 'name')
self.dispname = get_from_xml(node, 'displayName')
self.desc = get_from_xml(node, 'description')
self.offset = get_from_xml(node, 'addressOffset')
self.rsize = get_from_xml(node, 'size')
self.rvalue = get_from_xml(node, 'resetValue')
self.access = get_from_xml(node, 'access')
for x in node.findall('./fields/field'):
self.fields.append(field(self, x))
self.sortField()
def toXML(self, node=None):
if node is None:
node = et.Element('register')
et.SubElement(node, 'name').text = self.name
if self.dispname:
et.SubElement(node, 'displayName').text = self.dispname
if self.desc:
et.SubElement(node, 'description').text = self.desc
et.SubElement(node, 'addressOffset').text = self.offset
if self.rsize:
et.SubElement(node, 'size').text = self.rsize
if self.access:
et.SubElement(node, 'access').text = self.access
if self.rvalue:
et.SubElement(node, 'resetValue').text = self.rvalue
if self.fields:
f = et.SubElement(node, 'fields')
for x in self.fields:
x.toXML(et.SubElement(f, 'field'))
return node
def newField(self, name=''):
r = 0
for x in sorted(self.fields, key=lambda x: x._bito, reverse=False):
if r < x._bito:
break
r = x._bito + x._bitw
if r < int(self.vsize, 0):
f = field(self)
f._bito = r
if name:
f.name = name
return f
else:
return None
def addField(self, field):
field.parent = self
self.fields.append(field)
self.sortField()
def sortField(self):
self.fields.sort(key=lambda x: x._bito, reverse=True)
def delField(self, item):
self.fields.remove(item)
def validate(self, callback):
names = []
cap = int(self.vsize, 0)
ofs = 0
for x in sorted(self.fields, key=lambda x: x._bito):
if x.name in names:
if callback('Duplicated bitfield name %s in %s' % (x.name, self.name)):
return True
elif x._bito + x._bitw > cap:
if callback('Bitfield %s is out of bounds in %s' % (x.name, self.name)):
return True
elif ofs > x._bito:
if callback('Bitfields %s and %s overlapped in %s' % (x.name, names[-1], self.name)):
return True
elif x.vaccess == 'undefined':
if callback('Undefined access level for %s in %s' % (x.name, self.name)):
return True
else:
names.append(x.name)
ofs = x._bito + x._bitw
return False
class interrupt(basedata):
def __init__(self, parent, xml=None):
basedata.__init__(self, parent)
self._value = 0
if xml is not None:
self.fromXML(xml)
@property
def value(self):
return str(self._value)
@value.setter
def value(self, val):
self._value = toInt(val, self._value)
@property
def valid(self):
return (self.name and self.desc)
def fromXML(self, node):
self.name = get_from_xml(node, 'name')
self.desc = get_from_xml(node, 'description')
self.value = get_from_xml(node, 'value')
def toXML(self, node=None):
if node is None:
node = et.Element('interrupt')
et.SubElement(node, 'name').text = self.name
if self.desc:
et.SubElement(node, 'description').text = self.desc
et.SubElement(node, 'value').text = self.value
return node
class peripheral(basedata):
def __init__(self, parent, xml=None):
basedata.__init__(self, parent)
self.ref = None
self.group = None
self._address = 0
self._aoffset = 0
self._asize = 0x400
self.interrupts = []
self.registers = []
if xml is not None:
self.fromXML(xml)
@property
def asize(self):
return '0x{0:04X}'.format(self._asize)
@asize.setter
def asize(self, val):
self._asize = toInt(val, self._asize)
@property
def aoffset(self):
return '0x{0:08X}'.format(self._aoffset)
@aoffset.setter
def aoffset(self, val):
self._aoffset = toInt(val, self._aoffset)
@property
def address(self):
return '0x{0:08X}'.format(self._address)
@address.setter
def address(self, val):
self._address = toInt(val, self._address)
def fromXML(self, node):
del self.interrupts[:]
del self.registers[:]
self.name = get_from_xml(node, 'name')
if 'derivedFrom' in node.attrib:
ref = node.attrib['derivedFrom']
for x in self.parent.peripherals:
if x.name == ref:
self.ref = x
break
else:
self.ref = None
self.desc = get_from_xml(node, 'description')
self.group = get_from_xml(node, 'groupName')
self.address = get_from_xml(node, 'baseAddress')
self.aoffset = get_from_xml(node, './addressBlock/offset')
self.asize = get_from_xml(node, './addressBlock/size')
for x in node.findall('./interrupt'):
self.interrupts.append(interrupt(self, x))
for x in node.findall('./registers/register'):
self.registers.append(register(self, x))
self.registers.sort(key=lambda x: x._offset, reverse=False)
def toXML(self, node=None):
if node is None:
node = et.Element('peripheral')
if self.ref:
node.set('derivedFrom', self.ref.name)
et.SubElement(node, 'name').text = self.name
if self.group:
et.SubElement(node, 'groupName').text = self.group
if self.desc:
et.SubElement(node, 'description').text = self.desc
et.SubElement(node, 'baseAddress').text = self.address
a = et.SubElement(node, 'addressBlock')
et.SubElement(a, 'offset').text = self.aoffset
et.SubElement(a, 'size').text = self.asize
et.SubElement(a, 'usage').text = 'registers'
for x in self.interrupts:
x.toXML(et.SubElement(node, 'interrupt'))
if self.registers:
r = et.SubElement(node, 'registers')
for x in self.registers:
x.toXML(et.SubElement(r, 'register'))
return node
def newRegister(self, name=''):
o = 0
sz = int(self.vsize, 0) / 8
for x in sorted(self.registers, key=lambda x: x._offset, reverse=False):
if o < x._offset:
break
o = x._offset + sz
if o < self._asize:
r = register(self)
r._offset = o
if name:
r.name = name
return r
else:
return None
def setRef(self, ref):
if ref:
for x in self.parent.peripherals:
if x == self:
return False
if x.name == ref:
self.ref = x
return True
return False
else:
self.ref = None
return True
def addRegister(self, item):
item.parent = self
self.registers.append(item)
self.registers.sort(key=lambda x: x._offset, reverse=False)
def delRegister(self, item):
self.registers.remove(item)
def newInterrupt(self, name=''):
ni = interrupt(self)
if name:
ni.name = name
return ni
def addInterrupt(self, reg):
if not next((i for i in self.interrupts if i.value == reg.value), None):
self.interrupts.append(reg)
def delInterrupt(self, item):
self.interrupts.remove(item)
def validate(self, callback):
names = []
ofs = 0
for x in sorted(self.registers, key=lambda x: x._offset, reverse=False):
rsize = int(x.vsize, 0) / 8
if x.name in names:
if callback('Duplicated register name %s in %s' % (x.name, self.name)):
return True
elif x._offset < ofs:
if callback('Registers %s and %s in %s is overlapped' % (x.name, names[-1], self.name)):
return True
elif x._offset + rsize > self._asize:
if callback('Register %s is out of bounds in %s' % (x.name, self.name)):
return True
elif x.vaccess == 'undefined':
if callback('Undefined access level for %s in %s' % (x.name, self.name)):
return True
else:
if x.validate(callback):
return True
names.append(x.name)
ofs = x._offset + rsize
return False
class device(basedata):
def __init__(self, xml=None):
basedata.__init__(self, None)
self.vendor = None
self.width = '32'
self.rsize = '0x20'
self.rvalue = '0x00000000'
self.rmask = '0xFFFFFFFF'
self.access = 'read-write'
self.peripherals = []
if xml is not None:
self.fromXML(xml)
def fromString(self, str):
xml = et.fromstring(str)
self.fromXML(xml)
def fromXML(self, node):
del self.peripherals[:]
self.vendor = get_from_xml(node, 'vendor')
self.name = get_from_xml(node, 'name')
self.desc = get_from_xml(node, 'description')
self.width = get_from_xml(node, 'width')
self.rsize = get_from_xml(node, 'size')
self.access = get_from_xml(node, 'access')
self.rvalue = get_from_xml(node, 'resetValue')
self.rmask = get_from_xml(node, 'resetMask')
for x in node.findall('./peripherals/peripheral'):
self.peripherals.append(peripheral(self, x))
def toXML(self, node=None):
if node is None:
node = et.Element('export_device')
if self.vendor:
et.SubElement(node, 'vendor').text = self.vendor
et.SubElement(node, 'name').text = self.name
et.SubElement(node, 'version').text = '1.0'
et.SubElement(node, 'description').text = self.desc
et.SubElement(node, 'addressUnitBits').text = '8'
et.SubElement(node, 'width').text = self.width
et.SubElement(node, 'size').text = self.rsize
et.SubElement(node, 'access').text = self.access
et.SubElement(node, 'resetValue').text = self.rvalue
et.SubElement(node, 'resetMask').text = self.rmask
p = et.SubElement(node, 'peripherals')
for per in self.peripherals:
per.toXML(et.SubElement(p, 'peripheral'))
return node
def newPeripheral(self, name=''):
p = peripheral(self)
p.name = name
return p
def delPeripheral(self, item):
self.peripherals.remove(item)
def addPeripheral(self, item):
item.parent = self
self.peripherals.append(item)
def movePeripheral(self, dest, item):
uindex = 1 + self.peripherals.index(dest)
iindex = self.peripherals.index(item)
if iindex != uindex:
self.peripherals.insert(uindex, self.peripherals.pop(iindex))
def validate(self, callback):
names = []
vectors = []
ofs = 0
for x in sorted(self.peripherals, key=lambda x: x._address + x._aoffset):
if x.name in names:
if callback('Duplicated peripheral name %s' % (x.name)):
return True
if ofs > x._address + x._aoffset:
if callback('Peripherals %s and %s is overlapped' % (x.name, names[-1])):
return True
if x.validate(callback):
return True
names.append(x.name)
ofs = x._address + x._aoffset + x._asize
for i in x.interrupts:
if i.value in vectors:
if callback('Duplicated interrupt vector %s' % (i.name)):
return True
vectors.append(i.value)
return False
def load(self, name):
xml = et.parse(name)
self.fromXML(xml)
def save(self, name):
xs = 'http://www.w3.org/2001/XMLSchema-instance'
xml = et.Element('device', schemaVersion='1.1',
nsmap={'xs': xs},
attrib={'{' + xs + '}noNamespaceSchemaLocation': 'CMSIS-SVD_Schema_1_1.xsd'})
xml.addprevious(et.Comment('generated by SVD editor ' + __version__))
self.toXML(xml)
tree = et.ElementTree(xml)
tree.write(name, encoding='utf-8', xml_declaration=True, standalone=True, pretty_print=True)
| {
"content_hash": "99861e5f5ff829fff9e66b1ab5f32576",
"timestamp": "",
"source": "github",
"line_count": 591,
"max_line_length": 126,
"avg_line_length": 31.68020304568528,
"alnum_prop": 0.5402980291619933,
"repo_name": "dmitrystu/svd_editor",
"id": "9a06046dc73b24afd976561fa6bdf866c0be87be",
"size": "18736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/svd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "65402"
}
],
"symlink_target": ""
} |
import logging
import os
from pip._internal.utils.subprocess import runner_with_spinner_message
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional
from pip._vendor.pep517.wrappers import Pep517HookCaller
logger = logging.getLogger(__name__)
def build_wheel_pep517(
name, # type: str
backend, # type: Pep517HookCaller
metadata_directory, # type: str
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one InstallRequirement using the PEP 517 build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
assert metadata_directory is not None
if build_options:
# PEP 517 does not support --build-options
logger.error('Cannot build wheel for %s using PEP 517 when '
'--build-option is present', name)
return None
try:
logger.debug('Destination directory: %s', tempd)
runner = runner_with_spinner_message(
f'Building wheel for {name} (PEP 517)'
)
with backend.subprocess_runner(runner):
wheel_name = backend.build_wheel(
tempd,
metadata_directory=metadata_directory,
)
except Exception:
logger.error('Failed building wheel for %s', name)
return None
return os.path.join(tempd, wheel_name)
| {
"content_hash": "0af74dd07d64707d6382afa8565b78af",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 31.02127659574468,
"alnum_prop": 0.6419753086419753,
"repo_name": "nataddrho/DigiCue-USB",
"id": "d25f9c42f627a63a738c3bb8916c21d8e02a5c6e",
"size": "1458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python3/src/venv/Lib/site-packages/pip/_internal/operations/build/wheel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "64"
},
{
"name": "Python",
"bytes": "205084"
}
],
"symlink_target": ""
} |
from flask import Module, jsonify, render_template
from doodie import pagerduty_apikey, pagerduty_subdomain
from doodie.lib.pagerduty import Pagerduty
frontend = Module(__name__)
@frontend.route("/incidents")
def quick():
return render_template("incidents.html")
@frontend.route("/incidents/count/", defaults={"status": "all"})
@frontend.route("/incidents/count/<string:status>")
def incidents_count(status):
p = Pagerduty(pagerduty_apikey, pagerduty_subdomain)
count = p.incident_count(status=status)
if count:
return jsonify(message=count)
else:
abort(500)
| {
"content_hash": "7ac6c51368ead8860961d4419b0e00f4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 28.761904761904763,
"alnum_prop": 0.7168874172185431,
"repo_name": "ohlol/doodie",
"id": "99ead386eb84d7d6e9e1174bf19a79e913e0e936",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doodie/views/frontend.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1290"
},
{
"name": "Python",
"bytes": "3209"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from .common import ObjectMeta, LocalObjectReference
from ..base import Model
from ..fields import Field, ListField, RequiredField
class ContainerPort(Model):
name = Field(six.text_type)
hostPort = Field(int)
containerPort = Field(int)
protocol = Field(six.text_type, "TCP")
class ObjectFieldSelector(Model):
apiVersion = Field(six.text_type)
fieldPath = RequiredField(six.text_type)
class ResourceFieldSelector(Model):
containerName = Field(six.text_type)
resource = RequiredField(six.text_type)
divisor = Field(six.text_type)
class ConfigMapKeySelector(Model):
name = Field(six.text_type)
key = RequiredField(six.text_type)
class SecretKeySelector(Model):
name = Field(six.text_type)
key = RequiredField(six.text_type)
class EnvVarSource(Model):
fieldRef = Field(ObjectFieldSelector)
resourceFieldRef = Field(ResourceFieldSelector)
configMapKeyRef = Field(ConfigMapKeySelector)
secretKeyRef = Field(SecretKeySelector)
class SecretEnvSource(Model):
name = Field(six.text_type)
optional = Field(bool)
class ConfigMapEnvSource(Model):
name = Field(six.text_type)
optional = Field(bool)
class EnvFromSource(Model):
configMapRef = Field(ConfigMapEnvSource)
secretRef = Field(SecretEnvSource)
class EnvVar(Model):
name = Field(six.text_type)
value = Field(six.text_type)
valueFrom = Field(EnvVarSource)
class ResourceRequirements(Model):
limits = Field(dict)
requests = Field(dict)
class VolumeMount(Model):
name = Field(six.text_type)
readOnly = Field(bool)
mountPath = Field(six.text_type)
class HTTPHeader(Model):
name = Field(six.text_type)
value = Field(six.text_type)
class HTTPGetAction(Model):
path = Field(six.text_type)
port = Field(six.text_type, alt_type=int)
scheme = Field(six.text_type, "HTTP")
httpHeaders = ListField(HTTPHeader)
class TCPSocketAction(Model):
port = Field(six.text_type, alt_type=int)
class ExecAction(Model):
command = Field(list)
class Probe(Model):
httpGet = Field(HTTPGetAction)
tcpSocket = Field(TCPSocketAction)
_exec = Field(ExecAction)
initialDelaySeconds = Field(int, 5)
timeoutSeconds = Field(int)
successThreshold = Field(int)
failureThreshold = Field(int)
periodSeconds = Field(int)
class Handler(Model):
httpGet = Field(HTTPGetAction)
tcpSocket = Field(TCPSocketAction)
_exec = Field(ExecAction)
class Lifecycle(Model):
postStart = Field(Handler)
preStop = Field(Handler)
class Container(Model):
name = Field(six.text_type)
image = Field(six.text_type)
ports = ListField(ContainerPort)
env = ListField(EnvVar)
envFrom = ListField(EnvFromSource)
resources = Field(ResourceRequirements)
volumeMounts = ListField(VolumeMount)
lifecycle = Field(Lifecycle)
livenessProbe = Field(Probe)
readinessProbe = Field(Probe)
imagePullPolicy = Field(six.text_type, "IfNotPresent")
command = ListField(six.text_type)
args = ListField(six.text_type)
class SecretVolumeSource(Model):
secretName = Field(six.text_type)
optional = Field(bool)
defaultMode = Field(int)
class KeyToPath(Model):
key = RequiredField(six.text_type)
path = RequiredField(six.text_type)
class ConfigMapVolumeSource(Model):
name = Field(six.text_type)
optional = Field(bool)
defaultMode = Field(int)
class EmptyDirVolumeSource(Model):
medium = Field(six.text_type)
class NFSVolumeSource(Model):
path = Field(six.text_type)
readOnly = Field(bool)
server = Field(six.text_type)
class HostPathVolumeSource(Model):
path = Field(six.text_type)
class GCEPersistentDiskVolumeSource(Model):
fsType = Field(six.text_type)
partition = Field(int)
pdName = Field(six.text_type)
readOnly = Field(bool)
class AWSElasticBlockStoreVolumeSource(Model):
fsType = Field(six.text_type)
partition = Field(int)
readOnly = Field(bool)
volumeID = Field(six.text_type)
class Volume(Model):
name = Field(six.text_type)
awsElasticBlockStore = Field(AWSElasticBlockStoreVolumeSource)
configMap = Field(ConfigMapVolumeSource)
emptyDir = Field(EmptyDirVolumeSource)
gcePersistentDisk = Field(GCEPersistentDiskVolumeSource)
hostPath = Field(HostPathVolumeSource)
nfs = Field(NFSVolumeSource)
secret = Field(SecretVolumeSource)
class PodSpec(Model):
volumes = ListField(Volume)
containers = ListField(Container)
restartPolicy = Field(six.text_type, "Always")
terminationGracePeriodSeconds = Field(int)
activeDeadlineSeconds = Field(int)
dnsPolicy = Field(six.text_type, "ClusterFirst")
nodeName = Field(six.text_type)
nodeSelector = Field(dict)
selector = Field(dict)
serviceAccountName = Field(six.text_type, "default")
automountServiceAccountToken = Field(bool)
imagePullSecrets = ListField(LocalObjectReference)
initContainers = ListField(Container)
class PodTemplateSpec(Model):
metadata = Field(ObjectMeta)
spec = Field(PodSpec)
class Pod(Model):
class Meta:
list_url = "/api/v1/pods"
url_template = "/api/v1/namespaces/{namespace}/pods/{name}"
metadata = Field(ObjectMeta)
spec = Field(PodSpec)
| {
"content_hash": "0b90199538ce40bd16fef79015403501",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 67,
"avg_line_length": 24.527522935779817,
"alnum_prop": 0.7106788853562745,
"repo_name": "fiaas/k8s",
"id": "d589fe8a7f4a3ecacb6736c5fcaa8c034e2ba833",
"size": "5978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k8s/models/pod.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "183501"
}
],
"symlink_target": ""
} |
from mxconsole.summary.summary import *
from mxconsole.summary.writer.writer import FileWriter
| {
"content_hash": "dab51e6d817142bd014f56ac075efa83",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 54,
"avg_line_length": 47.5,
"alnum_prop": 0.8526315789473684,
"repo_name": "bravomikekilo/mxconsole",
"id": "b6bfe274228f45ea7c9b937dc66aeb82ca6e6cad",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mxconsole/summary/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "27900"
},
{
"name": "CSS",
"bytes": "5107"
},
{
"name": "HTML",
"bytes": "584168"
},
{
"name": "JavaScript",
"bytes": "1734943"
},
{
"name": "Protocol Buffer",
"bytes": "71639"
},
{
"name": "Python",
"bytes": "981371"
},
{
"name": "Shell",
"bytes": "1566"
},
{
"name": "TypeScript",
"bytes": "786869"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_epg_to_domain
short_description: Bind EPGs to Domains on Cisco ACI fabrics (fv:RsDomAtt)
description:
- Bind EPGs to Physical and Virtual Domains on Cisco ACI fabrics.
- More information from the internal APIC class
I(fv:RsDomAtt) at U(https://developer.cisco.com/media/mim-ref/MO-fvRsDomAtt.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob Mcgill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
notes:
- The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.
The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.
options:
allow_useg:
description:
- Allows micro-segmentation.
- The APIC defaults new EPG to Domain bindings to use C(encap).
choices: [ encap, useg ]
default: encap
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
aliases: [ app_profile, app_profile_name ]
deploy_immediacy:
description:
- Determines when the policy is pushed to hardware Policy CAM.
- The APIC defaults new EPG to Domain bindings to C(lazy).
choices: [ immediate, lazy ]
default: lazy
domain:
description:
- Name of the physical or virtual domain being associated with the EPG.
aliases: [ domain_name, domain_profile ]
domain_type:
description:
- Determines if the Domain is physical (phys) or virtual (vmm).
choices: [ phys, vmm ]
aliases: [ type ]
encap:
description:
- The VLAN encapsulation for the EPG when binding a VMM Domain with static encap_mode.
- This acts as the secondary encap when using useg.
choices: [ range from 1 to 4096 ]
encap_mode:
description:
- The ecapsulataion method to be used.
- The APIC defaults new EPG to Domain bindings to C(auto).
choices: [ auto, vlan, vxlan ]
default: auto
epg:
description:
- Name of the end point group.
aliases: [ epg_name ]
netflow:
description:
- Determines if netflow should be enabled.
- The APIC defaults new EPG to Domain binings to C(disabled).
choices: [ disabled, enabled ]
default: disabled
primary_encap:
description:
- Determines the primary VLAN ID when using useg.
choices: [ range from 1 to 4096 ]
resolution_immediacy:
description:
- Determines when the policies should be resolved and available.
- The APIC defaults new EPG to Domain bindings to C(lazy).
choices: [ immediate, lazy, pre-provision ]
default: lazy
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- Name of an existing tenant.
aliases: [ tenant_name ]
vm_provider:
description:
- The VM platform for VMM Domains.
choices: [ microsoft, openstack, vmware ]
extends_documentation_fragment: aci
'''
EXAMPLES = r''' # '''
RETURN = r''' # '''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
VM_PROVIDER_MAPPING = dict(microsoft="uni/vmmp-Microsoft/dom-", openstack="uni/vmmp-OpenStack/dom-", vmware="uni/vmmp-VMware/dom-")
def main():
argument_spec = aci_argument_spec
argument_spec.update(
allow_useg=dict(type='str', choices=['encap', 'useg']),
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),
deploy_immediacy=dict(type='str', choices=['immediate', 'on-demand']),
domain=dict(type='str', aliases=['domain_name', 'domain_profile']),
domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']),
encap=dict(type='int'),
encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']),
epg=dict(type='str', aliases=['name', 'epg_name']),
netflow=dict(type='str', choices=['disabled', 'enabled']),
primary_encap=dict(type='int'),
resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']),
vm_provider=dict(type='str', choices=['microsoft', 'openstack', 'vmware']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['domain_type', 'vmm', ['vm_provider']],
['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
],
)
allow_useg = module.params['allow_useg']
deploy_immediacy = module.params['deploy_immediacy']
domain = module.params['domain']
domain_type = module.params['domain_type']
vm_provider = module.params['vm_provider']
encap = module.params['encap']
if encap is not None:
if encap in range(1, 4097):
encap = 'vlan-{}'.format(encap)
else:
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
encap_mode = module.params['encap_mode']
netflow = module.params['netflow']
primary_encap = module.params['primary_encap']
if primary_encap is not None:
if primary_encap in range(1, 4097):
primary_encap = 'vlan-{}'.format(primary_encap)
else:
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
resolution_immediacy = module.params['resolution_immediacy']
state = module.params['state']
if domain_type == 'phys' and vm_provider is not None:
module.fail_json(msg="Domain type 'phys' cannot have a 'vm_provider'")
# Compile the full domain and add it to module.params for URL building
if domain_type == 'vmm':
module.params["epg_domain"] = VM_PROVIDER_MAPPING[vm_provider] + domain
elif domain_type is not None:
module.params["epg_domain"] = 'uni/phys-' + domain
aci = ACIModule(module)
aci.construct_url(root_class="tenant", subclass_1="ap", subclass_2="epg", subclass_3="epg_domain")
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='fvRsDomAtt',
class_config=dict(
classPref=allow_useg,
encap=encap,
encapMode=encap_mode,
instrImedcy=deploy_immediacy,
netflowPref=netflow,
primaryEncap=primary_encap,
resImedcy=resolution_immediacy,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='fvRsDomAtt')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Pop the epg_domain key that was added for URL building
module.params.pop("epg_domain")
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| {
"content_hash": "fec44ccbd5e9a5c689054c3cbbf4740c",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 146,
"avg_line_length": 36.81553398058252,
"alnum_prop": 0.6391086497890295,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "e7ad960c2aef70c4189e5b0f290cba2e68688baf",
"size": "7721",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/aci/aci_epg_to_domain.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
from minicps.devices import PLC
from utils import *
import random
import time
import logging
import signal
import sys
SENSOR_ADDR = IP['lit101']
LIT101 = ('LIT101', 1)
class Lit101(PLC):
def sigint_handler(self, sig, frame):
print "I received a SIGINT!"
global reader
reader = 0
sys.exit(0)
def pre_loop(self, sleep=0.1):
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGTERM, self.sigint_handler)
def main_loop(self):
#print 'DEBUG: sensor enters main_loop'
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO, filename='defense_replay_attack_5/replay_lit101.log')
count = 0
gaussian_noise_experiment = 1
noise_level = 0.03
attack_lower_limit = 50
random_duration = random.randint(5,20)
#random_duration = 10
attack_upper_limit = attack_lower_limit + random_duration
attack_upper_window = attack_lower_limit + 60
attack_active = 0
while True:
if count>=attack_lower_limit and count<=attack_upper_limit:
if count == attack_lower_limit:
self.level = float(self.get(LIT101))
attack_active = 1
else:
attack_active = 0
if attack_active==0:
self.level = float(self.get(LIT101))
if gaussian_noise_experiment == 1:
self.level = self.level + random.gauss(0, noise_level)
if self.level > 1.0:
self.level = 1.0
if self.level < 0:
self.level = 0.0
self.send(LIT101, self.level, SENSOR_ADDR)
logging.info('LIT101: %f', self.level)
logging.info('NORMAL')
time.sleep(PLC_PERIOD_SEC)
else:
self.send(LIT101, self.level, SENSOR_ADDR)
logging.info('LIT101: %f', self.level)
logging.info('ATTACK')
time.sleep(PLC_PERIOD_SEC)
if (count >= attack_upper_window):
attack_lower_limit = attack_upper_window
random_duration = random.randint(5,20)
attack_upper_limit = attack_lower_limit + random_duration
attack_upper_window = attack_lower_limit + 60
count = count+1
if __name__ == '__main__':
lit101 = Lit101(name='lit101',state=STATE,protocol=LIT101_PROTOCOL,memory=GENERIC_DATA,disk=GENERIC_DATA)
| {
"content_hash": "d2c3906599d20c1c6ba7d7eb0d3c606e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 131,
"avg_line_length": 28.65753424657534,
"alnum_prop": 0.6854684512428298,
"repo_name": "afmurillo/ICS-SDN",
"id": "821cfe15d0fb2c88f23d60ca3af76b45823762a0",
"size": "2092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paper-topo/replay_lit101.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "MATLAB",
"bytes": "1477611"
},
{
"name": "Makefile",
"bytes": "1445"
},
{
"name": "Python",
"bytes": "313665"
},
{
"name": "Roff",
"bytes": "32736"
},
{
"name": "Shell",
"bytes": "43606"
},
{
"name": "TeX",
"bytes": "208"
}
],
"symlink_target": ""
} |
from conduit.database import (Model, SurrogatePK, db,
reference_col, relationship)
from flask_jwt import current_identity
followers_assoc = db.Table("followers_assoc",
db.Column("follower", db.Integer, db.ForeignKey("userprofile.user_id")),
db.Column("followed_by", db.Integer, db.ForeignKey("userprofile.user_id")))
class UserProfile(Model, SurrogatePK):
__tablename__ = 'userprofile'
# id is needed for primary join, it does work with SurrogatePK class
id = db.Column(db.Integer, primary_key=True)
user_id = reference_col('users', nullable=False)
user = relationship('User', backref=db.backref('profile', uselist=False))
follows = relationship('UserProfile',
secondary=followers_assoc,
primaryjoin=id == followers_assoc.c.follower,
secondaryjoin=id == followers_assoc.c.followed_by,
backref='followed_by',
lazy='dynamic')
def __init__(self, user, **kwargs):
db.Model.__init__(self, user=user, **kwargs)
def is_following(self, profile):
return bool(self.follows.filter(followers_assoc.c.followed_by == profile.id).count())
def follow(self, profile):
if self is not profile and not self.is_following(profile):
self.follows.append(profile)
return True
return False
def unfollow(self, profile):
if self is not profile and self.is_following(profile):
self.follows.remove(profile)
return True
return False
@property
def following(self):
if current_identity:
return current_identity.profile.is_following(self)
return False
@property
def username(self):
return self.user.username
@property
def bio(self):
return self.user.bio
@property
def image(self):
return self.user.image
@property
def email(self):
return self.user.email
| {
"content_hash": "7d06c5b015f992e467e9e69fb432e043",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 102,
"avg_line_length": 33.15873015873016,
"alnum_prop": 0.5969363331737674,
"repo_name": "mohamed-aziz/realworld-flask",
"id": "0bcdf0cd4cd7e16cffaa99605757e384cba6e67a",
"size": "2089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conduit/profile/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48752"
}
],
"symlink_target": ""
} |
import os
import subprocess
import re
# Create a dictionary that maps number to the stand of the read
strand = {}
strand['1'] = 'F'
strand['2'] = 'R'
# ATACseq data
# Create dictionary mapping ATACseq Barcodes to Experimental Samples
ATACmap = {}
regex = re.compile('(P\d+)\s+(A\d)\s+(\wD)\s+(\d)')
with open('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/rawdata/ATACseq/README.ATACseq.txt') as f:
for line in f:
m = regex.search(line)
# m.group(1) = barcode
# m.group(2) = genotype
# m.group(3) = tissue
# m.group(4) = biological replicate
if m:
ATACmap[m.group(1)] = '%s_%s_Rep%s' %(m.group(2),m.group(3),m.group(4))
# Get files in folder and create symlinks with names based on ATACseq mapping dictionary
_,_,filenames = next(os.walk('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/rawdata/ATACseq'),(None, None, []))
regex = re.compile('Sample_[\w]+-[\w]+_4R009_L1_([\w\d]+)_R(\d).fq.gz')
for file in filenames:
if file.endswith('fq.gz'):
m = regex.search(file)
# m.group(1) = barcode
# m.group(2) = dtrand
if not os.path.exists('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s' %ATACmap[m.group(1)][0:2]):
os.mkdir('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s' %ATACmap[m.group(1)][0:2])
if not os.path.exists('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s/ATACseq' %ATACmap[m.group(1)][0:2]):
os.mkdir('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s/ATACseq' %ATACmap[m.group(1)][0:2])
subprocess.call(['ln', '-s', '/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/rawdata/ATACseq/%s' %file, '/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s/ATACseq/%s_%s.fq.gz' %(ATACmap[m.group(1)][0:2],ATACmap[m.group(1)],strand[m.group(2)])])
# DNAseq data
DNAmap = dict(zip(['ADL06','ADL09','ADL10','ADL14'],['A4','A5','A6','A7']))
# Get files in folder and create symlinks with names based on ATACseq mapping dictionary
_,_,filenames = next(os.walk('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/rawdata/DNAseq'),(None, None, []))
regex = re.compile('(ADL\d+)_(\d)_(\d)')
for file in filenames:
if file.endswith('fq.gz'):
m = regex.search(file)
# m.group(1) = barcode
# m.group(2) = replicate lane
# m.group(3) = strand
if not os.path.exists('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s' %DNAmap[m.group(1)]):
os.mkdir('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s' %DNAmap[m.group(1)])
if not os.path.exists('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s/DNAseq' %DNAmap[m.group(1)]):
os.mkdir('/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s/DNAseq' %DNAmap[m.group(1)])
subprocess.call(['ln', '-s', '/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/rawdata/DNAseq/%s' %file, '/bio/lillyl1/Classes/EE283/Bioinformatics_Course/data/od_rawdata/%s/DNAseq/%s_Rep%s_%s.fq.gz' %(DNAmap[m.group(1)],DNAmap[m.group(1)],m.group(2),strand[m.group(3)])])
| {
"content_hash": "54c176135eb554e1d382705e6fb1cc8f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 285,
"avg_line_length": 57.07142857142857,
"alnum_prop": 0.6614518147684606,
"repo_name": "matilian26/EE283",
"id": "c7318e3f35ca2aec28080791e22e2d83b6affbe3",
"size": "3196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bioinformatics_Course/scripts/01_organize_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1410"
},
{
"name": "Python",
"bytes": "10513"
},
{
"name": "R",
"bytes": "704"
},
{
"name": "Roff",
"bytes": "1148803"
},
{
"name": "Shell",
"bytes": "11637"
}
],
"symlink_target": ""
} |
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""Class which generates the consolidated test cases result in console at the
end of Test Suite or Project Execution """
import os
from Framework.Utils import xml_Utils
from Framework.Utils.print_Utils import print_info
class ExecutionSummary():
"""Warrior execution summary class"""
def __init__(self, junit_file=None):
""" Constructor """
self.junit_file = junit_file
def project_summary(self, junit_file):
"""To get the project name, project status and it's location"""
tree = xml_Utils.get_tree_from_file(self.junit_file)
project_list = []
for names in tree.iter('testsuites'):
proj_detail = names.attrib
proj_name = proj_detail.get('name')
file_type = self.get_file_type(junit_file)
project_status = proj_detail.get('status')
proj_loc = []
for properties in tree.iter('property'):
project_details = properties.attrib
if project_details.get('name') == 'location':
proj_loc.append(project_details.get('value'))
proj_location = proj_loc[0]
project_list.append([file_type, proj_name, project_status, proj_location])
return project_list
def suite_summary(self, junit_file):
""" To get the name, status and location of both test suite and test case"""
tree = xml_Utils.get_tree_from_file(self.junit_file)
suite_tc_list = []
for values in tree.iter('testsuite'):
suite_detail = values.attrib
suite_name = suite_detail.get('name')
suite_status = suite_detail.get('status')
suite_location = suite_detail.get('suite_location')
suite_result_dir = suite_detail.get('resultsdir')
if suite_location is not None:
suite_tc_list.append(["Suites", suite_name, suite_status, suite_location])
#to add Setup results in suite summary
for value in tree.iter('Setup'):
setup_details = value.attrib
setup_status = setup_details.get('status')
setup_name = setup_details.get('name')+".xml"
setup_location = setup_details.get('testcasefile_path')
case_result_dir_with_tc_name = setup_details.get('resultsdir')
if case_result_dir_with_tc_name is not None:
case_result_dir = os.path.dirname(case_result_dir_with_tc_name)
# suite junit element will not have resultsdir attrib for case execution
if suite_result_dir is None or suite_result_dir == case_result_dir:
suite_tc_list.append(["Setup", setup_name, setup_status, setup_location])
for value in tree.iter('testcase'):
testcase_details = value.attrib
testcase_status = testcase_details.get('status')
testcase_name = testcase_details.get('name')+".xml"
testcase_location = testcase_details.get('testcasefile_path')
case_result_dir_with_tc_name = testcase_details.get('resultsdir')
if case_result_dir_with_tc_name is not None:
case_result_dir = os.path.dirname(case_result_dir_with_tc_name)
# suite junit element will not have resultsdir attrib for case execution
if suite_result_dir is None or suite_result_dir == case_result_dir:
suite_tc_list.append(["Testcase", testcase_name, testcase_status,
testcase_location])
#to add debug results in suite summary
for value in tree.iter('Debug'):
debug_details = value.attrib
debug_status = debug_details.get('status')
debug_name = debug_details.get('name')+".xml"
debug_location = debug_details.get('testcasefile_path')
case_result_dir_with_tc_name = debug_details.get('resultsdir')
if case_result_dir_with_tc_name is not None:
case_result_dir = os.path.dirname(case_result_dir_with_tc_name)
# suite junit element will not have resultsdir attrib for case execution
if suite_result_dir is None or suite_result_dir == case_result_dir:
suite_tc_list.append(["Debug", debug_name, debug_status, debug_location])
#to add Cleanup results in suite summary
for value in tree.iter('Cleanup'):
cleanup_details = value.attrib
cleanup_status = cleanup_details.get('status')
cleanup_name = cleanup_details.get('name')+".xml"
cleanup_location = cleanup_details.get('testcasefile_path')
case_result_dir_with_tc_name = cleanup_details.get('resultsdir')
if case_result_dir_with_tc_name is not None:
case_result_dir = os.path.dirname(case_result_dir_with_tc_name)
# suite junit element will not have resultsdir attrib for case execution
if suite_result_dir is None or suite_result_dir == case_result_dir:
suite_tc_list.append(["Cleanup", cleanup_name, cleanup_status, cleanup_location])
# suite_tc_list appends suites and test cases as per execution order
return suite_tc_list
def get_file_type(self, junit_file):
"""To get the file type which is given for execution"""
tree = xml_Utils.get_tree_from_file(self.junit_file)
for names in tree.iter('testsuites'):
file_detail = names.attrib
file_val = file_detail.get('name')
if file_val == "customProject_independant_testcase_execution":
file_type = "Suites"
else:
file_type = "Project"
return file_type
def print_result_in_console(self, junit_file):
"""To print the consolidated test cases result in console at the end of Test Case/Test
Suite/Project Execution"""
file_type = self.get_file_type(junit_file)
# Formatting execution summary as project_summary and suite_summary returns the list values
print_info("+++++++++++++++++++++++++++++++++++++++++++++++++ Execution Summary +++++++++++++++++++++++++++++++++++++++++++++++++")
print_info("{0:10}{1:50}{2:10}{3:50}".format('Type', 'Name', 'Status', 'Path'))
if file_type == "Project":
project_exec = self.project_summary(junit_file)
for proj in project_exec:
print_info(("{0:10}{1:50}{2:10}{3:30}"
.format(proj[0], proj[1], proj[2], proj[3])))
suite_tc_exec = self.suite_summary(junit_file)
for suite_tc in suite_tc_exec:
print_info(("{0:10}{1:50}{2:10}{3:30}"
.format(suite_tc[0], suite_tc[1], suite_tc[2], suite_tc[3])))
elif file_type == "Suites":
suite_tc_exec = self.suite_summary(junit_file)
for suite_tc in suite_tc_exec:
print_info(("{0:10}{1:50}{2:10}{3:30}"
.format(suite_tc[0], suite_tc[1], suite_tc[2], suite_tc[3])))
print_info("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
| {
"content_hash": "f4876a10ef88b95eb3f307cebf7d7bb1",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 139,
"avg_line_length": 56.836879432624116,
"alnum_prop": 0.5786124282505615,
"repo_name": "warriorframework/warriorframework",
"id": "22ec2f98e4f1c92649405c20bdc8515bdced1d0f",
"size": "8014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "warrior/WarriorCore/Classes/execution_summary_class.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "226699"
},
{
"name": "HTML",
"bytes": "1971325"
},
{
"name": "JavaScript",
"bytes": "1488764"
},
{
"name": "Python",
"bytes": "4217003"
},
{
"name": "Shell",
"bytes": "914"
},
{
"name": "XSLT",
"bytes": "2391"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import supybot
import supybot.conf as conf
from supybot import commands
import supybot.utils as utils
from supybot.commands import *
import supybot.ircdb as ircdb
import supybot.irclib as irclib
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.utils.iter import ifilter
class Misc(callbacks.Plugin):
def __init__(self, irc):
self.__parent = super(Misc, self)
self.__parent.__init__(irc)
self.invalidCommands = ircutils.FloodQueue(60)
def callPrecedence(self, irc):
return ([cb for cb in irc.callbacks if cb is not self], [])
def invalidCommand(self, irc, msg, tokens):
assert not msg.repliedTo, 'repliedTo msg in Misc.invalidCommand.'
assert self is irc.callbacks[-1], 'Misc isn\'t last callback.'
self.log.debug('Misc.invalidCommand called (tokens %s)', tokens)
channel = msg.args[0]
# Only bother with the invaildCommand flood handling if it's actually
# enabled
if conf.supybot.abuse.flood.command.invalid():
# First, we check for invalidCommand floods. This is rightfully done
# here since this will be the last invalidCommand called, and thus it
# will only be called if this is *truly* an invalid command.
maximum = conf.supybot.abuse.flood.command.invalid.maximum()
banmasker = conf.supybot.protocols.irc.banmask.makeBanmask
self.invalidCommands.enqueue(msg)
if self.invalidCommands.len(msg) > maximum and \
not ircdb.checkCapability(msg.prefix, 'owner'):
penalty = conf.supybot.abuse.flood.command.invalid.punishment()
banmask = banmasker(msg.prefix)
self.log.info('Ignoring %s for %s seconds due to an apparent '
'invalid command flood.', banmask, penalty)
if tokens and tokens[0] == 'Error:':
self.log.warning('Apparent error loop with another Supybot '
'observed. Consider ignoring this bot '
'permanently.')
ircdb.ignores.add(banmask, time.time() + penalty)
if conf.supybot.abuse.flood.command.invalid.notify():
irc.reply('You\'ve given me %s invalid commands within '
'the last minute; I\'m now ignoring you for %s.' %
(maximum,
utils.timeElapsed(penalty, seconds=False)))
return
# Now, for normal handling.
if conf.get(conf.supybot.reply.whenNotCommand, channel):
if len(tokens) >= 2:
cb = irc.getCallback(tokens[0])
if cb:
plugin = cb.name()
irc.error(format('The %q plugin is loaded, but there is '
'no command named %q in it. Try "list '
'%s" to see the commands in the %q '
'plugin.', plugin, tokens[1],
plugin, plugin))
else:
irc.errorInvalid('command', tokens[0], repr=False)
else:
command = tokens and tokens[0] or ''
irc.errorInvalid('command', command, repr=False)
else:
if tokens:
# echo [] will get us an empty token set, but there's no need
# to log this in that case anyway, it being a nested command.
self.log.info('Not replying to %s, not a command.', tokens[0])
if irc.nested:
bracketConfig = conf.supybot.commands.nested.brackets
brackets = conf.get(bracketConfig, channel)
if brackets:
(left, right) = brackets
irc.reply(left + ' '.join(tokens) + right)
else:
pass # Let's just do nothing, I can't think of better.
def list(self, irc, msg, args, optlist, cb):
"""[--private] [<plugin>]
Lists the commands available in the given plugin. If no plugin is
given, lists the public plugins available. If --private is given,
lists the private plugins.
"""
private = False
for (option, argument) in optlist:
if option == 'private':
private = True
if not self.registryValue('listPrivatePlugins') and \
not ircdb.checkCapability(msg.prefix, 'owner'):
irc.errorNoCapability('owner')
if not cb:
def isPublic(cb):
name = cb.name()
return conf.supybot.plugins.get(name).public()
names = [cb.name() for cb in irc.callbacks
if (private and not isPublic(cb)) or
(not private and isPublic(cb))]
names.sort()
if names:
irc.reply(format('%L', names))
else:
if private:
irc.reply('There are no private plugins.')
else:
irc.reply('There are no public plugins.')
else:
commands = cb.listCommands()
if commands:
commands.sort()
irc.reply(format('%L', commands))
else:
irc.reply(format('That plugin exists, but has no commands. '
'This probably means that it has some '
'configuration variables that can be '
'changed in order to modify its behavior. '
'Try "config list supybot.plugins.%s" to see '
'what configuration variables it has.',
cb.name()))
list = wrap(list, [getopts({'private':''}), additional('plugin')])
def apropos(self, irc, msg, args, s):
"""<string>
Searches for <string> in the commands currently offered by the bot,
returning a list of the commands containing that string.
"""
commands = {}
L = []
for cb in irc.callbacks:
if isinstance(cb, callbacks.Plugin):
for command in cb.listCommands():
if s in command:
commands.setdefault(command, []).append(cb.name())
for (key, names) in commands.iteritems():
for name in names:
L.append('%s %s' % (name, key))
if L:
L.sort()
irc.reply(format('%L', L))
else:
irc.reply('No appropriate commands were found.')
apropos = wrap(apropos, ['lowered'])
def help(self, irc, msg, args, command):
"""[<plugin>] [<command>]
This command gives a useful description of what <command> does.
<plugin> is only necessary if the command is in more than one plugin.
"""
command = map(callbacks.canonicalName, command)
(maxL, cbs) = irc.findCallbacksForArgs(command)
if maxL == command:
if len(cbs) > 1:
names = sorted([cb.name() for cb in cbs])
irc.error(format('That command exists in the %L plugins. '
'Please specify exactly which plugin command '
'you want help with.', names))
else:
assert cbs, 'Odd, maxL == command, but no cbs.'
irc.reply(cbs[0].getCommandHelp(command, False))
else:
irc.error(format('There is no command %q.',
callbacks.formatCommand(command)))
help = wrap(help, [many('something')])
def version(self, irc, msg, args):
"""takes no arguments
Returns the version of the current bot.
"""
try:
newest = utils.web.getUrl('http://supybot.sf.net/version.txt')
newest ='The newest version available online is %s.'%newest.strip()
except utils.web.Error, e:
self.log.info('Couldn\'t get website version: %s', e)
newest = 'I couldn\'t fetch the newest version ' \
'from the Supybot website.'
s = 'The current (running) version of this Supybot is %s. %s' % \
(conf.version, newest)
irc.reply(s)
version = wrap(thread(version))
def source(self, irc, msg, args):
"""takes no arguments
Returns a URL saying where to get Supybot.
"""
irc.reply('My source is at https://github.com/Supybot/Supybot')
source = wrap(source)
def more(self, irc, msg, args, nick):
"""[<nick>]
If the last command was truncated due to IRC message length
limitations, returns the next chunk of the result of the last command.
If <nick> is given, it takes the continuation of the last command from
<nick> instead of the person sending this message.
"""
userHostmask = msg.prefix.split('!', 1)[1]
if nick:
try:
(private, L) = irc._mores[nick]
if not private:
irc._mores[userHostmask] = L[:]
else:
irc.error('%s has no public mores.' % nick)
return
except KeyError:
irc.error('Sorry, I can\'t find any mores for %s' % nick)
return
try:
L = irc._mores[userHostmask]
chunk = L.pop()
if L:
chunk += format(' \x02(%n)\x0F', (len(L), 'more', 'message'))
irc.reply(chunk, True)
except KeyError:
irc.error('You haven\'t asked me a command; perhaps you want '
'to see someone else\'s more. To do so, call this '
'command with that person\'s nick.')
except IndexError:
irc.error('That\'s all, there is no more.')
more = wrap(more, [additional('seenNick')])
def _validLastMsg(self, msg):
return msg.prefix and \
msg.command == 'PRIVMSG' and \
ircutils.isChannel(msg.args[0])
def last(self, irc, msg, args, optlist):
"""[--{from,in,on,with,without,regexp} <value>] [--nolimit]
Returns the last message matching the given criteria. --from requires
a nick from whom the message came; --in requires a channel the message
was sent to; --on requires a network the message was sent on; --with
requires some string that had to be in the message; --regexp requires
a regular expression the message must match; --nolimit returns all
the messages that can be found. By default, the channel this command is
given in is searched.
"""
predicates = {}
nolimit = False
skipfirst = True
if ircutils.isChannel(msg.args[0]):
predicates['in'] = lambda m: ircutils.strEqual(m.args[0],
msg.args[0])
else:
skipfirst = False
for (option, arg) in optlist:
if option == 'from':
def f(m, arg=arg):
return ircutils.hostmaskPatternEqual(arg, m.nick)
predicates['from'] = f
elif option == 'in':
def f(m, arg=arg):
return ircutils.strEqual(m.args[0], arg)
predicates['in'] = f
if arg != msg.args[0]:
skipfirst = False
elif option == 'on':
def f(m, arg=arg):
return m.receivedOn == arg
predicates['on'] = f
elif option == 'with':
def f(m, arg=arg):
return arg.lower() in m.args[1].lower()
predicates.setdefault('with', []).append(f)
elif option == 'without':
def f(m, arg=arg):
return arg.lower() not in m.args[1].lower()
predicates.setdefault('without', []).append(f)
elif option == 'regexp':
def f(m, arg=arg):
if ircmsgs.isAction(m):
m1 = ircmsgs.unAction(m)
else:
m1 = m.args[1]
return regexp_wrapper(m1, reobj=arg, timeout=0.1,
plugin_name=self.name(),
fcn_name='last')
predicates.setdefault('regexp', []).append(f)
elif option == 'nolimit':
nolimit = True
iterable = ifilter(self._validLastMsg, reversed(irc.state.history))
if skipfirst:
# Drop the first message only if our current channel is the same as
# the channel we've been instructed to look at.
iterable.next()
predicates = list(utils.iter.flatten(predicates.itervalues()))
# Make sure the user can't get messages from channels they aren't in
def userInChannel(m):
return m.args[0] in irc.state.channels \
and msg.nick in irc.state.channels[m.args[0]].users
predicates.append(userInChannel)
# Make sure the user can't get messages from a +s channel unless
# they're calling the command from that channel or from a query
def notSecretMsg(m):
return not irc.isChannel(msg.args[0]) \
or msg.args[0] == m.args[0] \
or (m.args[0] in irc.state.channels \
and 's' not in irc.state.channels[m.args[0]].modes)
predicates.append(notSecretMsg)
resp = []
if irc.nested and not \
self.registryValue('last.nested.includeTimestamp'):
tsf = None
else:
tsf = self.registryValue('timestampFormat')
if irc.nested and not self.registryValue('last.nested.includeNick'):
showNick = False
else:
showNick = True
for m in iterable:
for predicate in predicates:
if not predicate(m):
break
else:
if nolimit:
resp.append(ircmsgs.prettyPrint(m,
timestampFormat=tsf,
showNick=showNick))
else:
irc.reply(ircmsgs.prettyPrint(m,
timestampFormat=tsf,
showNick=showNick))
return
if not resp:
irc.error('I couldn\'t find a message matching that criteria in '
'my history of %s messages.' % len(irc.state.history))
else:
irc.reply(format('%L', resp))
last = wrap(last, [getopts({'nolimit': '',
'on': 'something',
'with': 'something',
'from': 'something',
'without': 'something',
'in': 'callerInGivenChannel',
'regexp': 'regexpMatcher',})])
def tell(self, irc, msg, args, target, text):
"""<nick> <text>
Tells the <nick> whatever <text> is. Use nested commands to your
benefit here.
"""
if irc.nested:
irc.error('This command cannot be nested.', Raise=True)
if target.lower() == 'me':
target = msg.nick
if ircutils.isChannel(target):
irc.error('Dude, just give the command. No need for the tell.')
return
if not ircutils.isNick(target):
irc.errorInvalid('nick', target)
if ircutils.nickEqual(target, irc.nick):
irc.error('You just told me, why should I tell myself?',Raise=True)
if target not in irc.state.nicksToHostmasks and \
not ircdb.checkCapability(msg.prefix, 'owner'):
# We'll let owners do this.
s = 'I haven\'t seen %s, I\'ll let you do the telling.' % target
irc.error(s, Raise=True)
if irc.action:
irc.action = False
text = '* %s %s' % (irc.nick, text)
s = '%s wants me to tell you: %s' % (msg.nick, text)
irc.replySuccess()
irc.reply(s, to=target, private=True)
tell = wrap(tell, ['something', 'text'])
def ping(self, irc, msg, args):
"""takes no arguments
Checks to see if the bot is alive.
"""
irc.reply('pong', prefixNick=False)
Class = Misc
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| {
"content_hash": "a8288ec7df20bb29e1d46fe84c91024c",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 81,
"avg_line_length": 43.4030612244898,
"alnum_prop": 0.5143999059597978,
"repo_name": "buildbot/supybot",
"id": "c4c93e15a8502c4183056aeb02378e2d6f446085",
"size": "18637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/Misc/plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2026939"
}
],
"symlink_target": ""
} |
from ShareYourSystem.Standards.Classors.Representer import _print
from ShareYourSystem.Standards.Objects import Applyier
#</ImportSpecificModules>
#Print a version of the class
_print(dict(Applyier.ApplyierClass.__dict__.items()))
#Print a version of this object
_print(Applyier.ApplyierClass())
#Print a version of his __dict__
_print(Applyier.ApplyierClass().__dict__)
#Test
_print(Applyier.attest_apply(),**{'RepresentingAlineaIsBool':False}) | {
"content_hash": "adf6395d204918082d59a78604f2f5f4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 29.933333333333334,
"alnum_prop": 0.779510022271715,
"repo_name": "Ledoux/ShareYourSystem",
"id": "33050d44061579a1b46e022dbcc7fdea39aab21a",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlogy/draft/Applyier/Test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
import os
import pep8
import testtools
import sst
import sst.selftests
class Pep8ConformanceTestCase(testtools.TestCase):
packages = [sst, sst.scripts, sst.tests, sst.selftests]
def test_pep8_conformance(self):
pep8style = pep8.StyleGuide(show_source=True)
for package in self.packages:
dir = os.path.dirname(package.__file__)
pep8style.input_dir(dir)
self.assertEqual(pep8style.options.report.total_errors, 0)
| {
"content_hash": "1c0a667a212e4e53a2db7ac50051e297",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 24.894736842105264,
"alnum_prop": 0.6955602536997886,
"repo_name": "DramaFever/sst",
"id": "418773c71189765b9b3154a321474e4b8eee4f80",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sst/tests/test_code_format.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2786"
},
{
"name": "CSS",
"bytes": "269"
},
{
"name": "HTML",
"bytes": "17795"
},
{
"name": "Python",
"bytes": "302776"
},
{
"name": "Shell",
"bytes": "6047"
}
],
"symlink_target": ""
} |
import requests
class Google(object):
def __init__(self):
pass
def get_homepage_html(self):
response = requests.get('https://www.google.com')
print response.text
| {
"content_hash": "9a76fd51b943a8ec90c5d0274e5b8279",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 51,
"avg_line_length": 16,
"alnum_prop": 0.6875,
"repo_name": "tjlee0909/tj-testing-python-packaging",
"id": "097528f5c4d362b26e2d0770e41ac6fc7bafc301",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tjtestingpythonpackaging/google.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "879"
}
],
"symlink_target": ""
} |
"""This module detects whether third-party libraries, utilized by third-party
drivers, are present on the system. If they are not, it mocks them and tinkers
with sys.modules so that the drivers can be loaded by unit tests, and the unit
tests can continue to test the functionality of those drivers without the
respective external libraries' actually being present.
Any external library required by a third-party driver should be mocked here.
Current list of mocked libraries:
- proliantutils
- pysnmp
- scciclient
- oneview_client
- pywsman
- python-dracclient
"""
import sys
import mock
from oslo_utils import importutils
import six
from ironic.drivers.modules import ipmitool
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
as mock_specs
# IPMITool driver checks the system for presence of 'ipmitool' binary during
# __init__. We bypass that check in order to run the unit tests, which do not
# depend on 'ipmitool' being on the system.
ipmitool.TIMING_SUPPORT = False
ipmitool.DUAL_BRIDGE_SUPPORT = False
ipmitool.SINGLE_BRIDGE_SUPPORT = False
proliantutils = importutils.try_import('proliantutils')
if not proliantutils:
proliantutils = mock.MagicMock(spec_set=mock_specs.PROLIANTUTILS_SPEC)
sys.modules['proliantutils'] = proliantutils
sys.modules['proliantutils.ilo'] = proliantutils.ilo
sys.modules['proliantutils.ilo.client'] = proliantutils.ilo.client
sys.modules['proliantutils.exception'] = proliantutils.exception
sys.modules['proliantutils.utils'] = proliantutils.utils
proliantutils.utils.process_firmware_image = mock.MagicMock()
proliantutils.exception.IloError = type('IloError', (Exception,), {})
command_exception = type('IloCommandNotSupportedError', (Exception,), {})
proliantutils.exception.IloCommandNotSupportedError = command_exception
proliantutils.exception.InvalidInputError = type(
'InvalidInputError', (Exception,), {})
proliantutils.exception.ImageExtractionFailed = type(
'ImageExtractionFailed', (Exception,), {})
if 'ironic.drivers.ilo' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.ilo'])
oneview_client = importutils.try_import('oneview_client')
if not oneview_client:
oneview_client = mock.MagicMock(spec_set=mock_specs.ONEVIEWCLIENT_SPEC)
sys.modules['oneview_client'] = oneview_client
sys.modules['oneview_client.client'] = oneview_client.client
states = mock.MagicMock(
spec_set=mock_specs.ONEVIEWCLIENT_STATES_SPEC,
ONEVIEW_POWER_OFF='Off',
ONEVIEW_POWERING_OFF='PoweringOff',
ONEVIEW_POWER_ON='On',
ONEVIEW_POWERING_ON='PoweringOn',
ONEVIEW_RESETTING='Resetting',
ONEVIEW_ERROR='error')
sys.modules['oneview_client.states'] = states
sys.modules['oneview_client.exceptions'] = oneview_client.exceptions
sys.modules['oneview_client.utils'] = oneview_client.utils
oneview_client.exceptions.OneViewException = type('OneViewException',
(Exception,), {})
sys.modules['oneview_client.models'] = oneview_client.models
oneview_client_module = importutils.try_import('oneview_client.client')
# NOTE(vdrok): Always mock the oneview client, as it tries to establish
# connection to oneview right in __init__, and stevedore does not seem to care
# about mocks when it loads a module in mock_the_extension_manager
sys.modules['oneview_client.client'].Client = mock.MagicMock(
spec_set=mock_specs.ONEVIEWCLIENT_CLIENT_CLS_SPEC
)
if 'ironic.drivers.oneview' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.oneview'])
# attempt to load the external 'python-dracclient' library, which is required
# by the optional drivers.modules.drac module
dracclient = importutils.try_import('dracclient')
if not dracclient:
dracclient = mock.MagicMock(spec_set=mock_specs.DRACCLIENT_SPEC)
dracclient.client = mock.MagicMock(
spec_set=mock_specs.DRACCLIENT_CLIENT_MOD_SPEC)
dracclient.constants = mock.MagicMock(
spec_set=mock_specs.DRACCLIENT_CONSTANTS_MOD_SPEC,
POWER_OFF=mock.sentinel.POWER_OFF,
POWER_ON=mock.sentinel.POWER_ON,
REBOOT=mock.sentinel.REBOOT)
sys.modules['dracclient'] = dracclient
sys.modules['dracclient.client'] = dracclient.client
sys.modules['dracclient.constants'] = dracclient.constants
sys.modules['dracclient.exceptions'] = dracclient.exceptions
dracclient.exceptions.BaseClientException = type('BaseClientException',
(Exception,), {})
# Now that the external library has been mocked, if anything had already
# loaded any of the drivers, reload them.
if 'ironic.drivers.modules.drac' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.drac'])
# attempt to load the external 'pysnmp' library, which is required by
# the optional drivers.modules.snmp module
pysnmp = importutils.try_import("pysnmp")
if not pysnmp:
pysnmp = mock.MagicMock(spec_set=mock_specs.PYWSNMP_SPEC)
sys.modules["pysnmp"] = pysnmp
sys.modules["pysnmp.entity"] = pysnmp.entity
sys.modules["pysnmp.entity.rfc3413"] = pysnmp.entity.rfc3413
sys.modules["pysnmp.entity.rfc3413.oneliner"] = (
pysnmp.entity.rfc3413.oneliner)
sys.modules["pysnmp.entity.rfc3413.oneliner.cmdgen"] = (
pysnmp.entity.rfc3413.oneliner.cmdgen)
sys.modules["pysnmp.error"] = pysnmp.error
pysnmp.error.PySnmpError = Exception
sys.modules["pysnmp.proto"] = pysnmp.proto
sys.modules["pysnmp.proto.rfc1902"] = pysnmp.proto.rfc1902
# Patch the RFC1902 integer class with a python int
pysnmp.proto.rfc1902.Integer = int
# if anything has loaded the snmp driver yet, reload it now that the
# external library has been mocked
if 'ironic.drivers.modules.snmp' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.snmp'])
# attempt to load the external 'scciclient' library, which is required by
# the optional drivers.modules.irmc module
scciclient = importutils.try_import('scciclient')
if not scciclient:
mock_scciclient = mock.MagicMock(spec_set=mock_specs.SCCICLIENT_SPEC)
sys.modules['scciclient'] = mock_scciclient
sys.modules['scciclient.irmc'] = mock_scciclient.irmc
sys.modules['scciclient.irmc.scci'] = mock.MagicMock(
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC,
POWER_OFF=mock.sentinel.POWER_OFF,
POWER_ON=mock.sentinel.POWER_ON,
POWER_RESET=mock.sentinel.POWER_RESET,
MOUNT_CD=mock.sentinel.MOUNT_CD,
UNMOUNT_CD=mock.sentinel.UNMOUNT_CD,
MOUNT_FD=mock.sentinel.MOUNT_FD,
UNMOUNT_FD=mock.sentinel.UNMOUNT_FD)
# if anything has loaded the iRMC driver yet, reload it now that the
# external library has been mocked
if 'ironic.drivers.modules.irmc' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.irmc'])
# install mock object to prevent 'iscsi_irmc' and 'agent_irmc' from
# checking whether NFS/CIFS share file system is mounted or not.
irmc_boot = importutils.import_module(
'ironic.drivers.modules.irmc.boot')
irmc_boot.check_share_fs_mounted_orig = irmc_boot.check_share_fs_mounted
irmc_boot.check_share_fs_mounted_patcher = mock.patch(
'ironic.drivers.modules.irmc.boot.check_share_fs_mounted')
irmc_boot.check_share_fs_mounted_patcher.return_value = None
ironic_inspector_client = importutils.try_import('ironic_inspector_client')
if not ironic_inspector_client:
ironic_inspector_client = mock.MagicMock(
spec_set=mock_specs.IRONIC_INSPECTOR_CLIENT_SPEC)
ironic_inspector_client.ClientV1 = mock_specs.InspectorClientV1Specs
sys.modules['ironic_inspector_client'] = ironic_inspector_client
if 'ironic.drivers.modules.inspector' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.inspector'])
class MockKwargsException(Exception):
def __init__(self, *args, **kwargs):
super(MockKwargsException, self).__init__(*args)
self.kwargs = kwargs
ucssdk = importutils.try_import('UcsSdk')
if not ucssdk:
ucssdk = mock.MagicMock()
sys.modules['UcsSdk'] = ucssdk
sys.modules['UcsSdk.utils'] = ucssdk.utils
sys.modules['UcsSdk.utils.power'] = ucssdk.utils.power
sys.modules['UcsSdk.utils.management'] = ucssdk.utils.management
sys.modules['UcsSdk.utils.exception'] = ucssdk.utils.exception
ucssdk.utils.exception.UcsOperationError = (
type('UcsOperationError', (MockKwargsException,), {}))
ucssdk.utils.exception.UcsConnectionError = (
type('UcsConnectionError', (MockKwargsException,), {}))
if 'ironic.drivers.modules.ucs' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.ucs'])
imcsdk = importutils.try_import('ImcSdk')
if not imcsdk:
imcsdk = mock.MagicMock()
imcsdk.ImcException = Exception
sys.modules['ImcSdk'] = imcsdk
if 'ironic.drivers.modules.cimc' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.cimc'])
| {
"content_hash": "4c1b04077dcc686bd819273cfe4e3a40",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 78,
"avg_line_length": 43.614285714285714,
"alnum_prop": 0.7252975215634895,
"repo_name": "NaohiroTamura/ironic",
"id": "7d3934cdefd93e1ad139e6bcd8110f6f218a79e7",
"size": "9816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/drivers/third_party_driver_mocks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5077786"
},
{
"name": "Shell",
"bytes": "107935"
}
],
"symlink_target": ""
} |
import os
import sys
import re
import logging
import traceback
from typing import Optional
from PyQt5 import QtWidgets, QtCore
from PyQt5 import QtPrintSupport
import sas.qtgui.Utilities.GuiUtils as GuiUtils
import sas.qtgui.Utilities.ObjectLibrary as ObjectLibrary
from sas.qtgui.Utilities.Reports.UI.ReportDialogUI import Ui_ReportDialogUI
from sas.qtgui.Utilities.Reports.reportdata import ReportData
class ReportDialog(QtWidgets.QDialog, Ui_ReportDialogUI):
"""
Class for stateless grid-like printout of model parameters for mutiple models
"""
def __init__(self, report_data: ReportData, parent: Optional[QtCore.QObject]=None):
super().__init__(parent)
self.setupUi(self)
# disable the context help icon
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
self.report_data = report_data
#self.save_location = None
#if 'ReportDialog_directory' in ObjectLibrary.listObjects():
self.save_location = ObjectLibrary.getObject('ReportDialog_directory')
# Fill in the table from input data
self.setupDialog(self.report_data.html)
# Command buttons
self.cmdPrint.clicked.connect(self.onPrint)
self.cmdSave.clicked.connect(self.onSave)
def setupDialog(self, output=None):
"""
Display the HTML content in the browser.
"""
if output is not None:
self.txtBrowser.setHtml(output)
def onPrint(self):
"""
Display the print dialog and send the report to printer
"""
# Define the printer
printer = QtPrintSupport.QPrinter()
# Display the print dialog
dialog = QtPrintSupport.QPrintDialog(printer)
dialog.setModal(True)
dialog.setWindowTitle("Print")
if dialog.exec_() != QtWidgets.QDialog.Accepted:
return
document = self.txtBrowser.document()
try:
# pylint chokes on this line with syntax-error
# pylint: disable=syntax-error doesn't seem to help
document.print(printer)
except Exception as ex:
# Printing can return various exceptions, let's catch them all
logging.error("Print report failed with: " + str(ex))
def onSave(self):
"""
Display the Save As... prompt and save the report if instructed so
"""
# Choose user's home directory
if self.save_location is None:
location = os.path.expanduser('~')
else:
location = self.save_location
# Use a sensible filename default
default_name = os.path.join(str(location), 'report.pdf')
kwargs = {
'parent' : self,
'caption' : 'Save Report',
# don't use 'directory' in order to remember the previous user choice
'directory': default_name,
'filter' : 'PDF file (*.pdf);;HTML file (*.html);;Text file (*.txt)',
'options' : QtWidgets.QFileDialog.DontUseNativeDialog}
# Query user for filename.
filename_tuple = QtWidgets.QFileDialog.getSaveFileName(**kwargs)
filename = filename_tuple[0]
if not filename:
return
extension = filename_tuple[1]
self.save_location = os.path.dirname(filename)
# lifetime of this widget is short - keep the reference elsewhere
ObjectLibrary.addObject('ReportDialog_directory', self.save_location)
try:
# extract extension from filter
# e.g. "PDF file (*.pdf)" -> ".pdf"
ext = extension[extension.find("(")+2:extension.find(")")]
except IndexError as ex:
# (ext) not found...
logging.error("Error while saving report. " + str(ex))
return
basename, extension = os.path.splitext(filename)
if not extension:
filename = '.'.join((filename, ext))
if ext.lower() == ".txt":
self.write_string(self.report_data.text, filename)
elif ext.lower() == ".html":
self.write_string(self.report_data.html, filename)
elif ext.lower() == ".pdf":
html_utf = GuiUtils.replaceHTMLwithUTF8(self.report_data.html)
self.save_pdf(html_utf, filename)
else:
logging.error(f"Unknown file extension: {ext.lower()}")
@staticmethod
def write_string(string, filename):
"""
Write string to file
"""
with open(filename, 'w') as f:
f.write(string)
@staticmethod
def save_pdf(data, filename):
"""
Create a PDF file from html source string.
Returns True is the file creation was successful.
: data: html string
: filename: name of file to be saved
"""
# import moved from top due to cost
from xhtml2pdf import pisa
try:
# open output file for writing (truncated binary)
with open(filename, "w+b") as resultFile:
# convert HTML to PDF
pisaStatus = pisa.CreatePDF(data.encode("UTF-8"),
dest=resultFile,
encoding='UTF-8')
return pisaStatus.err
except Exception as ex:
# logging.error("Error creating pdf: " + str(ex))
logging.error("Error creating pdf: " + traceback.format_exc())
return False
| {
"content_hash": "51468923760edcc0424afe06a0581048",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 88,
"avg_line_length": 34.40625,
"alnum_prop": 0.599636693914623,
"repo_name": "SasView/sasview",
"id": "de64d606403dd9efda36a7b85b608f48b3469695",
"size": "5505",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/sas/qtgui/Utilities/Reports/ReportDialog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "60240"
},
{
"name": "Batchfile",
"bytes": "1616"
},
{
"name": "C",
"bytes": "11379"
},
{
"name": "C++",
"bytes": "217553"
},
{
"name": "CSS",
"bytes": "340"
},
{
"name": "Gherkin",
"bytes": "565"
},
{
"name": "HTML",
"bytes": "9252"
},
{
"name": "Inno Setup",
"bytes": "6892"
},
{
"name": "JavaScript",
"bytes": "27700"
},
{
"name": "Jupyter Notebook",
"bytes": "28926"
},
{
"name": "Makefile",
"bytes": "28052"
},
{
"name": "Python",
"bytes": "2959880"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
} |
from cinder.api.openstack import wsgi
from cinder.api.v3.views import clusters as clusters_view
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
CLUSTER_MICRO_VERSION = '3.7'
REPLICATION_DATA_MICRO_VERSION = '3.26'
class ClusterController(wsgi.Controller):
allowed_list_keys = {'name', 'binary', 'is_up', 'disabled', 'num_hosts',
'num_down_hosts', 'binary', 'replication_status',
'frozen', 'active_backend_id'}
replication_fields = {'replication_status', 'frozen', 'active_backend_id'}
policy_checker = wsgi.Controller.get_policy_checker('clusters')
@wsgi.Controller.api_version(CLUSTER_MICRO_VERSION)
def show(self, req, id, binary='cinder-volume'):
"""Return data for a given cluster name with optional binary."""
# Let the wsgi middleware convert NotAuthorized exceptions
context = self.policy_checker(req, 'get')
# Let the wsgi middleware convert NotFound exceptions
cluster = objects.Cluster.get_by_id(context, None, binary=binary,
name=id, services_summary=True)
replication_data = req.api_version_request.matches(
REPLICATION_DATA_MICRO_VERSION)
return clusters_view.ViewBuilder.detail(cluster, replication_data)
@wsgi.Controller.api_version(CLUSTER_MICRO_VERSION)
def index(self, req):
"""Return a non detailed list of all existing clusters.
Filter by is_up, disabled, num_hosts, and num_down_hosts.
"""
return self._get_clusters(req, detail=False)
@wsgi.Controller.api_version(CLUSTER_MICRO_VERSION)
def detail(self, req):
"""Return a detailed list of all existing clusters.
Filter by is_up, disabled, num_hosts, and num_down_hosts.
"""
return self._get_clusters(req, detail=True)
def _get_clusters(self, req, detail):
# Let the wsgi middleware convert NotAuthorized exceptions
context = self.policy_checker(req, 'get_all')
replication_data = req.api_version_request.matches(
REPLICATION_DATA_MICRO_VERSION)
filters = dict(req.GET)
allowed = self.allowed_list_keys
if not replication_data:
allowed = allowed.difference(self.replication_fields)
# Check filters are valid
if not allowed.issuperset(filters):
invalid_keys = set(filters).difference(allowed)
msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys)
raise exception.InvalidInput(reason=msg)
# Check boolean values
for bool_key in ('disabled', 'is_up'):
if bool_key in filters:
filters[bool_key] = utils.get_bool_param(bool_key, req.GET)
# For detailed view we need the services summary information
filters['services_summary'] = detail
clusters = objects.ClusterList.get_all(context, **filters)
return clusters_view.ViewBuilder.list(clusters, detail,
replication_data)
@wsgi.Controller.api_version(CLUSTER_MICRO_VERSION)
def update(self, req, id, body):
"""Enable/Disable scheduling for a cluster."""
# NOTE(geguileo): This method tries to be consistent with services
# update endpoint API.
# Let the wsgi middleware convert NotAuthorized exceptions
context = self.policy_checker(req, 'update')
if id not in ('enable', 'disable'):
raise exception.NotFound(message=_("Unknown action"))
disabled = id != 'enable'
disabled_reason = self._get_disabled_reason(body) if disabled else None
if not disabled and disabled_reason:
msg = _("Unexpected 'disabled_reason' found on enable request.")
raise exception.InvalidInput(reason=msg)
name = body.get('name')
if not name:
raise exception.MissingRequired(element='name')
binary = body.get('binary', 'cinder-volume')
# Let wsgi handle NotFound exception
cluster = objects.Cluster.get_by_id(context, None, binary=binary,
name=name)
cluster.disabled = disabled
cluster.disabled_reason = disabled_reason
cluster.save()
# We return summary data plus the disabled reason
replication_data = req.api_version_request.matches(
REPLICATION_DATA_MICRO_VERSION)
ret_val = clusters_view.ViewBuilder.summary(cluster, replication_data)
ret_val['cluster']['disabled_reason'] = disabled_reason
return ret_val
def _get_disabled_reason(self, body):
reason = body.get('disabled_reason')
if reason:
# Let wsgi handle InvalidInput exception
reason = reason.strip()
utils.check_string_length(reason, 'Disabled reason', min_length=1,
max_length=255)
return reason
def create_resource():
return wsgi.Resource(ClusterController())
| {
"content_hash": "1c114c7609f65cc38e9dd73b3dc87027",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 79,
"avg_line_length": 39.9140625,
"alnum_prop": 0.630456057936974,
"repo_name": "ge0rgi/cinder",
"id": "120f1e6a106f20eefee083c207fb0b99d1a07da1",
"size": "5741",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/api/v3/clusters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
} |
from typing import List # noqa: F401
from libqtile import bar, layout, widget
from libqtile.config import Click, Drag, Group, Key, Screen
from libqtile.lazy import lazy
from libqtile.utils import guess_terminal
mod = "mod4"
terminal = guess_terminal()
keys = [
# Switch between windows in current stack pane
Key([mod], "k", lazy.layout.down(),
desc="Move focus down in stack pane"),
Key([mod], "j", lazy.layout.up(),
desc="Move focus up in stack pane"),
# Move windows up or down in current stack
Key([mod, "control"], "k", lazy.layout.shuffle_down(),
desc="Move window down in current stack "),
Key([mod, "control"], "j", lazy.layout.shuffle_up(),
desc="Move window up in current stack "),
# Switch window focus to other pane(s) of stack
Key([mod], "space", lazy.layout.next(),
desc="Switch window focus to other pane(s) of stack"),
# Swap panes of split stack
Key([mod, "shift"], "space", lazy.layout.rotate(),
desc="Swap panes of split stack"),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with
# multiple stack panes
Key([mod, "shift"], "Return", lazy.layout.toggle_split(),
desc="Toggle between split and unsplit sides of stack"),
Key([mod], "Return", lazy.spawn(terminal), desc="Launch terminal"),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"),
Key([mod], "w", lazy.window.kill(), desc="Kill focused window"),
Key([mod, "control"], "r", lazy.restart(), desc="Restart qtile"),
Key([mod, "control"], "q", lazy.shutdown(), desc="Shutdown qtile"),
Key([mod], "r", lazy.spawncmd(),
desc="Spawn a command using a prompt widget"),
]
groups = [Group(i) for i in "asdfuiop"]
for i in groups:
keys.extend([
# mod1 + letter of group = switch to group
Key([mod], i.name, lazy.group[i.name].toscreen(),
desc="Switch to group {}".format(i.name)),
# mod1 + shift + letter of group = switch to & move focused window to group
Key([mod, "shift"], i.name, lazy.window.togroup(i.name, switch_group=True),
desc="Switch to & move focused window to group {}".format(i.name)),
# Or, use below if you prefer not to switch to that group.
# # mod1 + shift + letter of group = move focused window to group
# Key([mod, "shift"], i.name, lazy.window.togroup(i.name),
# desc="move focused window to group {}".format(i.name)),
])
layouts = [
layout.Max(),
layout.Stack(num_stacks=2),
# Try more layouts by unleashing below layouts.
# layout.Bsp(),
# layout.Columns(),
# layout.Matrix(),
# layout.MonadTall(),
# layout.MonadWide(),
# layout.RatioTile(),
# layout.Tile(),
# layout.TreeTab(),
# layout.VerticalTile(),
# layout.Zoomy(),
]
widget_defaults = dict(
font='sans',
fontsize=12,
padding=3,
)
extension_defaults = widget_defaults.copy()
screens = [
Screen(
bottom=bar.Bar(
[
widget.CurrentLayout(),
widget.GroupBox(),
widget.Prompt(),
widget.WindowName(),
widget.TextBox("default config", name="default"),
widget.TextBox("Press <M-r> to spawn", foreground="#d75f5f"),
widget.Systray(),
widget.Clock(format='%Y-%m-%d %a %I:%M %p'),
widget.QuickExit(),
],
24,
),
),
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
{'wmclass': 'confirm'},
{'wmclass': 'dialog'},
{'wmclass': 'download'},
{'wmclass': 'error'},
{'wmclass': 'file_progress'},
{'wmclass': 'notification'},
{'wmclass': 'splash'},
{'wmclass': 'toolbar'},
{'wmclass': 'confirmreset'}, # gitk
{'wmclass': 'makebranch'}, # gitk
{'wmclass': 'maketag'}, # gitk
{'wname': 'branchdialog'}, # gitk
{'wname': 'pinentry'}, # GPG key password entry
{'wmclass': 'ssh-askpass'}, # ssh-askpass
])
auto_fullscreen = True
focus_on_window_activation = "smart"
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
| {
"content_hash": "77f2bae051b214f384bf290217f56f7a",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 83,
"avg_line_length": 34.796052631578945,
"alnum_prop": 0.6141047456986197,
"repo_name": "tych0/qtile",
"id": "106dfd79e9fb4463ba61e8bd829384e38de6d599",
"size": "6583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libqtile/resources/default_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1012"
},
{
"name": "Python",
"bytes": "1299146"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "8166"
}
],
"symlink_target": ""
} |
"""CacheControl import Interface.
Make it easy to import from cachecontrol without long namespaces.
"""
__author__ = 'Eric Larson'
__email__ = '[email protected]'
__version__ = '0.12.0'
from .wrapper import CacheControl
from .adapter import CacheControlAdapter
from .controller import CacheController
| {
"content_hash": "3761291af0db3750d7e1be05b000e65b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 65,
"avg_line_length": 27.454545454545453,
"alnum_prop": 0.7549668874172185,
"repo_name": "sigmavirus24/pip",
"id": "a486fa25fb44eb6814fc55dd809fdd56b080c9ca",
"size": "302",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pip/_vendor/cachecontrol/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2342"
},
{
"name": "Python",
"bytes": "981296"
},
{
"name": "Shell",
"bytes": "1885"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name = 'c2c.recipe.cssmin',
version = '0.6',
license = 'MIT License',
author = 'Frederic Junod',
author_email = '[email protected]',
url = 'https://github.com/camptocamp/c2c.recipe.cssmin',
description = 'A buildout recipe to merge and compress css files',
long_description = open('README.rst').read(),
classifiers = [
'Framework :: Buildout',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License'
],
install_requires = ['cssmin'],
packages = find_packages(),
namespace_packages = ['c2c', 'c2c.recipe'],
entry_points = {'zc.buildout' : ['default = c2c.recipe.cssmin.buildout:CssMin']}
)
| {
"content_hash": "edbd6436d9779992aa24cea66281a37f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 84,
"avg_line_length": 32.81481481481482,
"alnum_prop": 0.6297968397291196,
"repo_name": "camptocamp/c2c.recipe.cssmin",
"id": "4c52e1fb9aaf08aceb03195e1685d63895362541",
"size": "933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4565"
}
],
"symlink_target": ""
} |
import numpy as np
from skimage.measure import regionprops
from copy import deepcopy
PROP_SAVE = ['area', 'cell_id', 'convex_area', 'corr_x', 'corr_y', 'cv_intensity',
'eccentricity', 'equivalent_diameter', 'euler_number', 'extent', 'filled_area',
'major_axis_length', 'max_intensity', 'mean_intensity',
'median_intensity', 'min_intensity', 'orientation',
'perimeter', 'solidity', 'std_intensity', 'total_intensity', 'x', 'y',
'coords']
class CellListMaker(object):
'''Make a list of Cell objects'''
def __init__(self, img, label, params, frame=0):
self.img = img
self.label = label
self.params = params
self.frame = frame
def make_list(self):
cell_prop = regionprops(self.label, self.img, cache=True)
celllist = [Cell(i, self.frame) for i in cell_prop]
return celllist
class CellListMakerScalar(CellListMaker):
'''Make a list of Cell objects but remove any regionprops features
which are tuple, list or array to reduce memory usage.
'''
def make_list(self):
if self.label.any():
cell_prop = regionprops(self.label, self.img, cache=True)
celllist = [Cell(i, self.frame) for i in cell_prop]
features = [i for i in dir(celllist[0].prop) if not i.startswith('_')]
fremoved = []
for i in features:
if type(getattr(celllist[0].prop, i)) in (tuple, list, np.ndarray):
fremoved.append(i)
for i in fremoved:
[j.prop.__delattr__(i) for j in celllist]
return celllist
else:
return []
class Prop(object):
def __init__(self, prop):
for ki in prop.__class__.__dict__.iterkeys():
if '__' not in ki:
setattr(self, ki, prop.__getitem__(ki))
self.label_id = prop.label
pix = prop['intensity_image']
pix = pix[pix != 0]
# CAUTION
# This will not reflected to the objects labels (segmentation)
# if len(pix) > 2:
# pix = pix[(pix > np.nanpercentile(pix, 10)) * (pix<np.nanpercentile(pix, 90))]
self.mean_intensity = np.mean(pix)
self.median_intensity = np.median(pix)
self.total_intensity = prop['area'] * np.mean(pix)
self.std_intensity = np.std(pix)
self.cv_intensity = np.std(pix)/np.mean(pix)
self.x = self.centroid[1]
self.corr_x = self.centroid[1] # will updated when jitter corrected
self.y = self.centroid[0]
self.corr_y = self.centroid[0] # will updated when jitter corrected
self.parent_id = 0
self.frame = np.nan
self.abs_id = 0
self.cell_id = 0
class PropLight(object):
def __init__(self, prop):
for ki in prop.__class__.__dict__.iterkeys():
if ki in PROP_SAVE:
setattr(self, ki, prop.__getitem__(ki))
self.label_id = prop.label
pix = prop['intensity_image']
pix = pix[pix != 0]
# CAUTION
# This will not reflected to the objects labels (segmentation)
# if len(pix) > 2:
# pix = pix[(pix > np.nanpercentile(pix, 10)) * (pix<np.nanpercentile(pix, 90))]
self.mean_intensity = np.mean(pix, dtype=np.float32)
self.median_intensity = np.median(pix)
self.total_intensity = prop['area'] * np.mean(pix, dtype=np.float32)
self.std_intensity = np.std(pix, dtype=np.float32)
self.cv_intensity = np.std(pix, dtype=np.float32)/np.mean(pix, dtype=np.float32)
self.x = prop['centroid'][1]
self.corr_x = prop['centroid'][1] # will updated when jitter corrected
self.y = prop['centroid'][0]
self.corr_y = prop['centroid'][0] # will updated when jitter corrected
self.parent_id = 0
self.frame = np.nan
self.abs_id = 0
self.cell_id = 0
class Cell(object):
'''Cell object which holds Prop.
self.next and self.previous will return an associated cell in the next
frame or previous frame if available.
'''
def __init__(self, prop, frame):
self.frame = frame
self.prop = PropLight(prop)
self.cell_id = None
self.parent = None
self._next = None
self.previous = None
@property
def next(self):
return self._next
@next.setter
def next(self, partner):
self._next = partner
partner.previous = self
| {
"content_hash": "df110d64486b0e09f553a6378ddc0076",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 92,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.5749445676274945,
"repo_name": "braysia/covertrack",
"id": "ac013ca4f9dce6d87c9c17ece985b2f080d1fe07",
"size": "4510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "covertrack/cell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "682275"
},
{
"name": "Python",
"bytes": "250180"
},
{
"name": "Shell",
"bytes": "112"
}
],
"symlink_target": ""
} |
import re
from conary.dbstore import idtable
_cacheRe = {}
def checkTrove(pattern, trove):
global _cacheRe
if pattern == 'ALL' or trove is None:
return True
regExp = _cacheRe.get(pattern, None)
if regExp is None:
regExp = _cacheRe[pattern] = re.compile(pattern + '$')
if regExp.match(trove):
return True
return False
class Items(idtable.IdTable):
def __init__(self, db):
idtable.IdTable.__init__(self, db, 'Items', 'itemId', 'item')
def setTroveFlag(self, itemId, val):
cu = self.db.cursor()
if val: val = 1
else: val = 0
# we attempt to avoid doing busywork here in order to reduce
# lock contention on the items table during multiple commits
cu.execute("UPDATE Items SET hasTrove = ? "
"WHERE itemId = ? AND hasTrove != ?",
(val, itemId, val))
def iterkeys(self):
cu = self.db.cursor()
cu.execute("SELECT item FROM Items ORDER BY item")
for row in cu:
yield row[0]
def removeUnused(self):
cu = self.db.cursor()
cu.execute("""
DELETE FROM Items WHERE Items.itemId IN
(SELECT items.itemId FROM items
LEFT OUTER JOIN instances ON items.itemId = instances.itemId
WHERE instances.itemId is NULL)
""")
def updateCheckTrove(self, itemId, item):
cu = self.db.cursor()
# having a CheckTroveCace entry for (item, ALL) is a marker
# we've already processed this
cu.execute("select 1 from CheckTroveCache "
"where itemId = ? and patternId = 0", itemId)
if len(cu.fetchall()) > 0:
return
# need to process a new itemId
cu.execute("""
select distinct i.item, i.itemId from Permissions as p
join Items as i on p.itemId = i.itemId
where not exists (
select 1 from CheckTroveCache as ctc
where i.itemId = ctc.patternId and ctc.itemId = ? ) """, itemId)
pattSet = set([(x[0],x[1]) for x in cu.fetchall()])
# add the marker - this should not exist since we checked it earlier
pattSet.add(("ALL", 0))
for (pattern, patternId) in pattSet:
if checkTrove(pattern, item):
cu.execute("""
insert into CheckTroveCache(itemId, patternId)
values (?,?) """, (itemId, patternId))
def delId(self, theId):
cu = self.db.cursor()
cu.execute("delete from CheckTroveCache where itemId = ?", theId)
return idtable.IdTable.delId(self, theId)
# XXX: __setitem__ and __delitem__ aren't currently used, but if
# we do, they'll have to handle the CheckTrovesCache as well
def addPattern(self, pattern):
cu = self.db.cursor()
itemId = self.get(pattern, None)
if itemId is None:
itemId = idtable.IdTable.addId(self, pattern)
else:
# check if we're already tracking the permissions for this pattern
cu.execute("select count(*) from CheckTroveCache where patternId = ?",
itemId)
pCount = cu.fetchall()[0][0]
if pCount > 0:
return itemId
# need to update CheckTroveCache for this pattern
cu.execute("select Troves.itemId, Troves.item from Items as Troves")
for (tid, t) in cu.fetchall():
if checkTrove(pattern, t):
cu.execute("insert into CheckTroveCache(itemId, patternId) "
"values (?,?)", (tid, itemId))
return itemId
| {
"content_hash": "df02aadf8a4e92e03ed42665e36038f0",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 82,
"avg_line_length": 38.51578947368421,
"alnum_prop": 0.5714676141022137,
"repo_name": "fedora-conary/conary",
"id": "f80ecf003bb7d6c659bbf891f8141019deeb683c",
"size": "4246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conary/repository/netrepos/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "481681"
},
{
"name": "C++",
"bytes": "8244"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Erlang",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "45629"
},
{
"name": "Python",
"bytes": "10586616"
},
{
"name": "Shell",
"bytes": "4657"
},
{
"name": "Standard ML",
"bytes": "2756"
}
],
"symlink_target": ""
} |
import base64
from typing import List, Tuple
from google.cloud import aiplatform
from google.cloud.aiplatform.gapic.schema import predict
import requests
from train_model import with_retries
def run(
project: str, region: str, model_endpoint_id: str, image_file: str
) -> List[Tuple[str, float]]:
"""Sends an image from the LILA WCS database for prediction.
Args:
project: Google Cloud Project ID.
region: Location for AI Platform resources.
model_endpoint_id: Deployed model endpoint ID.
image_file: The image file path from LILA.
Returns:
The predictions as a list of (category, confidence) tuples, sorted by confidence.
"""
client = aiplatform.gapic.PredictionServiceClient(
client_options={
"api_endpoint": "us-central1-prediction-aiplatform.googleapis.com"
}
)
base_url = "https://lilablobssc.blob.core.windows.net/wcs-unzipped"
image_bytes = with_retries(lambda: requests.get(f"{base_url}/{image_file}").content)
response = client.predict(
endpoint=client.endpoint_path(
project=project, location=region, endpoint=model_endpoint_id
),
instances=[
predict.instance.ImageClassificationPredictionInstance(
content=base64.b64encode(image_bytes).decode("utf-8"),
).to_value()
],
parameters=predict.params.ImageClassificationPredictionParams(
confidence_threshold=0.1,
max_predictions=5,
).to_value(),
)
prediction = [dict(pred) for pred in response.predictions][0]
return sorted(
[
(category, confidence)
for category, confidence in zip(
prediction["displayNames"], prediction["confidences"]
)
],
reverse=True,
key=lambda x: x[1],
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--project",
required=True,
help="Google Cloud Project Id",
)
parser.add_argument(
"--region",
required=True,
help="Location for AI Platform resources",
)
parser.add_argument(
"--model-endpoint-id",
required=True,
help="Deployed model endpoint ID",
)
parser.add_argument(
"--image-file",
required=True,
help="The image file path from LILA",
)
args = parser.parse_args()
predictions = run(
args.project, args.region, args.model_endpoint_id, args.image_file
)
for category, confidence in predictions:
print(f"{category}: {confidence * 100.0 : .2f}% confidence")
| {
"content_hash": "2a9c2adc2ada680deadaa623628aece8",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 89,
"avg_line_length": 29.41304347826087,
"alnum_prop": 0.6178861788617886,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "9b50d05940977c36ce2ee741ff74bb3ef7ffd628",
"size": "3305",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "people-and-planet-ai/image-classification/predict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from io import StringIO
import pytest
from pandas import (
DataFrame,
Series,
concat,
)
import pandas._testing as tm
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader:
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
with parser.read_csv(StringIO(data), iterator=True) as reader:
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
with parser.read_csv(StringIO(data), chunksize=1) as reader:
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for iteration"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _:
pass
def test_iteration_open_handle(all_parsers):
parser = all_parsers
kwargs = {"squeeze": True, "header": None}
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
with open(path) as f:
for line in f:
if "CCC" in line:
break
result = parser.read_csv(f, **kwargs)
expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
tm.assert_series_equal(result, expected)
| {
"content_hash": "ca2c9e9a216d5f8316fff6eee1574fe3",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 88,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.5943325876211782,
"repo_name": "gfyoung/pandas",
"id": "5ae1d80589df9a77d2d2abf2414efebbb64601e2",
"size": "2682",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/io/parser/common/test_iterator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
"""Module for loading various file formats."""
import glob
import json
import os
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, Iterator, List, Tuple, Union
from .exceptions import NoCompatibleLoaderFoundError
from .fragment import Fragment
try:
import tomli
except ImportError: # pragma: nocover
tomli = None # type: ignore
try:
import yaml
except ImportError: # pragma: nocover
yaml = None # type: ignore
def iter_load(path: Union[str, Path]) -> Iterator[Fragment]:
"""Read settings file from a filepath or from a string representing the file contents.
If ``path`` is a valid filename or glob expression, load the
file (or all matching files).
Note that json, yaml and toml files are read.
Args:
path: Path to file or file contents
Raises:
FileLoadError: when an error occurs during the loading of a file.
NoCompatibleLoaderFoundError: when no compatible loader was found for
this filepath or content type.
"""
if not path:
return
expanded_path: str = os.path.expanduser(os.path.expandvars(path))
if glob.has_magic(expanded_path):
filepaths: List[str] = sorted(glob.glob(expanded_path))
else:
filepaths = [expanded_path]
for filepath in filepaths:
yield Fragment(value=load_from_filepath(filepath), source=filepath)
def load_from_filepath(filepath: str) -> Dict[str, Any]:
"""Read settings file from a filepath or from a string representing the file contents.
Args:
filepath: Path to file or file contents
Raises:
FileLoadError: when an error occurs during the loading of a file.
ContentLoadError: when an error occurs during the loading of file contents.
NoCompatibleLoaderFoundError: when no compatible loader was found for
this filepath or content type.
"""
file_data: dict = {}
if not filepath:
return file_data
for loader in FileLoader.registered_loaders:
if loader.is_path(filepath):
file_data = loader.from_path(filepath)
break
else:
raise NoCompatibleLoaderFoundError(
"Failed to load settings from filepath. "
"No compatible loader for file: {}".format(filepath)
)
return file_data
class FileLoader(ABC):
"""Abstract base class for file/file content loading."""
format_name: str = ""
valid_file_extensions: Tuple[str, ...] = ()
registered_loaders: List["FileLoader"] = []
@classmethod
@abstractmethod
def from_path(cls, path: str) -> Any:
"""Load serialized data from file at path."""
@classmethod
@abstractmethod
def from_content(cls, content: str) -> Any:
"""Load serialized data from content."""
@classmethod
def is_path(cls, path_or_content: str):
"""Check if argument is a valid file path.
If `only_existing` is set to ``True``, paths to files that don't exist
will also return ``False``.
"""
return len(str(path_or_content).strip().splitlines()) == 1 and (
os.path.splitext(path_or_content)[1] in cls.valid_file_extensions
)
@classmethod
def register(cls, class_to_register):
"""Register class as a valid file loader."""
cls.registered_loaders.append(class_to_register)
return class_to_register
@FileLoader.register
class JsonLoader(FileLoader):
"""FileLoader for .json files."""
format_name = "json"
valid_file_extensions = (".json",)
@classmethod
def from_content(cls, content: str) -> Any:
"""Load json from string."""
return json.loads(content)
@classmethod
def from_path(cls, path: str):
"""Load json from file at path."""
with open(path) as f:
return json.load(f)
@classmethod
def to_content(cls, data) -> str:
"""Serialize mapping to string."""
return json.dumps(data, indent=4)
@FileLoader.register
class YamlLoader(FileLoader):
"""FileLoader for .yaml files."""
format_name = "yaml"
valid_file_extensions = (".yml", ".yaml")
@classmethod
def from_content(cls, content: str) -> Any:
"""Load data from yaml formatted string."""
cls._check_yaml()
return yaml.safe_load(content)
@classmethod
def from_path(cls, path: str) -> Any:
"""Load data from path containing a yaml file."""
cls._check_yaml()
with open(path) as f:
return yaml.safe_load(f)
@staticmethod
def _check_yaml():
if yaml is None:
raise ImportError(
'"pyyaml" package needs to be installed to parse yaml files.'
)
@FileLoader.register
class TomlLoader(FileLoader):
"""FileLoader for .toml files."""
format_name = "toml"
valid_file_extensions = (".toml", ".ini", ".config", ".conf", ".cfg")
@classmethod
def from_content(cls, content: str) -> Any:
"""Load toml from string."""
cls._check_toml()
return tomli.loads(content)
@classmethod
def from_path(cls, path: str):
"""Load toml from file at path."""
cls._check_toml()
with open(path, "rb") as f:
return tomli.load(f)
@staticmethod
def _check_toml():
if tomli is None:
raise ImportError(
'"toml" package needs to be installed to parse toml files.'
)
| {
"content_hash": "4adc97f1f0d933f0d6c212b3d356e848",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 90,
"avg_line_length": 28.82198952879581,
"alnum_prop": 0.6257947320617621,
"repo_name": "daviskirk/climatecontrol",
"id": "2d6797370b0fd45c79c740af194ee160b549f85d",
"size": "5505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "climatecontrol/file_loaders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106922"
}
],
"symlink_target": ""
} |
"""Use unique constraints instead of unique indices
Revision ID: 1350
Revises: 1340
Create Date: 2019-07-10 12:43:28.461996
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1350'
down_revision = '1340'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(op.f('uq_direct_award_projects_external_id'), 'direct_award_projects', ['external_id'])
op.drop_index('ix_direct_award_projects_external_id', table_name='direct_award_projects')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_direct_award_projects_external_id', 'direct_award_projects', ['external_id'], unique=True)
op.drop_constraint(op.f('uq_direct_award_projects_external_id'), 'direct_award_projects', type_='unique')
# ### end Alembic commands ###
| {
"content_hash": "4c32d0ff94ff5a5bff476d1d1d26defa",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 119,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.698936170212766,
"repo_name": "alphagov/digitalmarketplace-api",
"id": "ad5383ac1391f851a750ba8813922d4d0596273e",
"size": "940",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "migrations/versions/1350_use_unique_constraints_direct_award.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "4777"
},
{
"name": "Makefile",
"bytes": "2140"
},
{
"name": "Mako",
"bytes": "414"
},
{
"name": "Nix",
"bytes": "3614"
},
{
"name": "Python",
"bytes": "1536454"
},
{
"name": "Shell",
"bytes": "973"
}
],
"symlink_target": ""
} |
import os
import hashlib
from sys import stdout
from os.path import join, getsize
from collections import Counter, defaultdict
rootdir = os.getcwd()
print('[i] using ' + rootdir + ' as root directory')
stdout.flush()
files = []
# XXX in case you have circular symlinks, we'll loop forever
for dirpath, dirnames, filenames in os.walk(rootdir, followlinks=True):
for filename in filenames:
currentfilename = join(dirpath, filename)
try:
currentfilesize = getsize(currentfilename)
if currentfilesize != 0:
filewithsize = currentfilename, currentfilesize
files.append(filewithsize)
except FileNotFoundError:
print('[-] could not get ' + currentfilename + ' file size')
stdout.flush()
print('[i] sorting ' + str(len(files)) + ' files by size')
stdout.flush()
files.sort(key = lambda t: t[1], reverse=True)
print('[i] removing unique files by size')
stdout.flush()
counter = Counter(t[1] for t in files)
files = [t for t in files if counter[t[1]] > 1]
print('[i] hashing remaining ' + str(len(files)) + ' files')
stdout.flush()
duplicates = defaultdict(list)
for f in files:
if f[1] > 102400000:
print('[i] hashing large file: ' + f[0])
stdout.flush()
hasher = hashlib.sha256()
try:
with open(f[0], 'rb') as file:
for chunk in iter(lambda: file.read(4096), b""):
hasher.update(chunk)
duplicates[hasher.hexdigest()].append(f)
except PermissionError:
print('[-] could not hash ' + f[0] + ' due to permissions')
stdout.flush()
except OSError:
print('[-] could not hash ' + f[0] + ' due to OS error')
stdout.flush()
for candidate in duplicates.items():
if len(candidate[1]) > 1:
print('[+] duplicates with hash ' + candidate[0] + ':')
for duplicate in candidate[1]:
print(duplicate[0] + ' (' + str(duplicate[1]) + ' bytes)')
stdout.flush()
| {
"content_hash": "7c67a52e25266fa6cbeb7c704ecafffb",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 72,
"avg_line_length": 32.81967213114754,
"alnum_prop": 0.6118881118881119,
"repo_name": "bomsi/dotfiles",
"id": "ec83ad904370ff04bb886c0b39c9bffdee2e45a9",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".local/bin/duplicates.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3496"
},
{
"name": "Shell",
"bytes": "287"
},
{
"name": "Vim script",
"bytes": "294"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.http import HttpResponse
from django.utils.timezone import is_aware
from django.utils.functional import Promise
from django.utils import six
import json
import datetime
import decimal
import uuid
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types and UUIDs.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, uuid.UUID):
return str(o)
elif isinstance(o, Promise):
return six.text_type(o)
else:
return super(DjangoJSONEncoder, self).default(o)
class JsonResponse(HttpResponse):
def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError(
'In order to allow non-dict objects to be '
'serialized set the safe parameter to False'
)
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder)
super(JsonResponse, self).__init__(content=data, **kwargs)
| {
"content_hash": "7194ce90beb17ace71b5b3234f10a4e7",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 85,
"avg_line_length": 33.527272727272724,
"alnum_prop": 0.5905639913232104,
"repo_name": "allisson/django-tiny-rest",
"id": "aa70f18f62065ba0d985b2913f4dac668529cff6",
"size": "1868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tiny_rest/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53979"
},
{
"name": "Shell",
"bytes": "105"
}
],
"symlink_target": ""
} |
import copy
import mock
import six
from heat.common import exception
from heat.common import identifier
from heat.common import template_format
from heat.engine.cfn import functions as cfn_functions
from heat.engine import check_resource as cr
from heat.engine import environment
from heat.engine import function
from heat.engine.hot import functions as hot_functions
from heat.engine.hot import parameters as hot_param
from heat.engine.hot import template as hot_template
from heat.engine import parameters
from heat.engine import resource
from heat.engine import resources
from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
empty_template = template_format.parse('''{
"HeatTemplateFormatVersion" : "2012-12-12",
}''')
hot_tpl_empty = template_format.parse('''
heat_template_version: 2013-05-23
''')
hot_juno_tpl_empty = template_format.parse('''
heat_template_version: 2014-10-16
''')
hot_kilo_tpl_empty = template_format.parse('''
heat_template_version: 2015-04-30
''')
hot_liberty_tpl_empty = template_format.parse('''
heat_template_version: 2015-10-15
''')
hot_mitaka_tpl_empty = template_format.parse('''
heat_template_version: 2016-04-08
''')
hot_newton_tpl_empty = template_format.parse('''
heat_template_version: 2016-10-14
''')
hot_tpl_empty_sections = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
resources:
outputs:
''')
hot_tpl_generic_resource = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: GenericResourceType
''')
hot_tpl_generic_resource_20141016 = template_format.parse('''
heat_template_version: 2014-10-16
resources:
resource1:
type: GenericResourceType
''')
hot_tpl_generic_resource_all_attrs = template_format.parse('''
heat_template_version: 2015-10-15
resources:
resource1:
type: GenericResourceType
''')
hot_tpl_complex_attrs_all_attrs = template_format.parse('''
heat_template_version: 2015-10-15
resources:
resource1:
type: ResourceWithComplexAttributesType
''')
hot_tpl_complex_attrs = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: ResourceWithComplexAttributesType
''')
hot_tpl_complex_attrs_20141016 = template_format.parse('''
heat_template_version: 2014-10-16
resources:
resource1:
type: ResourceWithComplexAttributesType
''')
hot_tpl_mapped_props = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: ResWithComplexPropsAndAttrs
resource2:
type: ResWithComplexPropsAndAttrs
properties:
a_list: { get_attr: [ resource1, list] }
a_string: { get_attr: [ resource1, string ] }
a_map: { get_attr: [ resource1, map] }
''')
hot_tpl_mapped_props_all_attrs = template_format.parse('''
heat_template_version: 2015-10-15
resources:
resource1:
type: ResWithComplexPropsAndAttrs
resource2:
type: ResWithComplexPropsAndAttrs
properties:
a_list: { get_attr: [ resource1, list] }
a_string: { get_attr: [ resource1, string ] }
a_map: { get_attr: [ resource1, map] }
''')
class DummyClass(object):
metadata = None
def metadata_get(self):
return self.metadata
def metadata_set(self, metadata):
self.metadata = metadata
class HOTemplateTest(common.HeatTestCase):
"""Test processing of HOT templates."""
@staticmethod
def resolve(snippet, template, stack=None):
return function.resolve(template.parse(stack, snippet))
@staticmethod
def resolve_condition(snippet, template, stack=None):
return function.resolve(template.parse_condition(stack, snippet))
def test_defaults(self):
"""Test default content behavior of HOT template."""
tmpl = template.Template(hot_tpl_empty)
# check if we get the right class
self.assertIsInstance(tmpl, hot_template.HOTemplate20130523)
# test getting an invalid section
self.assertNotIn('foobar', tmpl)
# test defaults for valid sections
self.assertEqual('No description', tmpl[tmpl.DESCRIPTION])
self.assertEqual({}, tmpl[tmpl.RESOURCES])
self.assertEqual({}, tmpl[tmpl.OUTPUTS])
def test_defaults_for_empty_sections(self):
"""Test default secntion's content behavior of HOT template."""
tmpl = template.Template(hot_tpl_empty_sections)
# check if we get the right class
self.assertIsInstance(tmpl, hot_template.HOTemplate20130523)
# test getting an invalid section
self.assertNotIn('foobar', tmpl)
# test defaults for valid sections
self.assertEqual('No description', tmpl[tmpl.DESCRIPTION])
self.assertEqual({}, tmpl[tmpl.RESOURCES])
self.assertEqual({}, tmpl[tmpl.OUTPUTS])
stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl)
self.assertIsNone(stack.parameters._validate_user_parameters())
self.assertIsNone(stack.validate())
def test_translate_resources_good(self):
"""Test translation of resources into internal engine format."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
expected = {'resource1': {'Type': 'AWS::EC2::Instance',
'Properties': {'property1': 'value1'},
'Metadata': {'foo': 'bar'},
'DependsOn': 'dummy',
'DeletionPolicy': 'dummy',
'UpdatePolicy': {'foo': 'bar'}}}
tmpl = template.Template(hot_tpl)
self.assertEqual(expected, tmpl[tmpl.RESOURCES])
def test_translate_resources_bad_no_data(self):
"""Test translation of resources without any mapping."""
hot_tpl = template_format.parse("""
heat_template_version: 2013-05-23
resources:
resource1:
""")
tmpl = template.Template(hot_tpl)
error = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('Each resource must contain a type key.',
six.text_type(error))
def test_translate_resources_bad_type(self):
"""Test translation of resources including invalid keyword."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
Type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"Type" is not a valid keyword '
'inside a resource definition',
six.text_type(err))
def test_translate_resources_bad_properties(self):
"""Test translation of resources including invalid keyword."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
Properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"Properties" is not a valid keyword '
'inside a resource definition',
six.text_type(err))
def test_translate_resources_resources_without_name(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
''')
tmpl = template.Template(hot_tpl)
error = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"resources" must contain a map of resource maps. '
'Found a [%s] instead' % six.text_type,
six.text_type(error))
def test_translate_resources_bad_metadata(self):
"""Test translation of resources including invalid keyword."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
Metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"Metadata" is not a valid keyword '
'inside a resource definition',
six.text_type(err))
def test_translate_resources_bad_depends_on(self):
"""Test translation of resources including invalid keyword."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
DependsOn: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"DependsOn" is not a valid keyword '
'inside a resource definition',
six.text_type(err))
def test_translate_resources_bad_deletion_policy(self):
"""Test translation of resources including invalid keyword."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
DeletionPolicy: dummy
update_policy:
foo: bar
''')
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"DeletionPolicy" is not a valid keyword '
'inside a resource definition',
six.text_type(err))
def test_translate_resources_bad_update_policy(self):
"""Test translation of resources including invalid keyword."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
UpdatePolicy:
foo: bar
''')
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"UpdatePolicy" is not a valid keyword '
'inside a resource definition',
six.text_type(err))
def test_get_outputs_good(self):
"""Test get outputs."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
outputs:
output1:
description: output1
value: value1
''')
expected = {'output1': {'description': 'output1', 'value': 'value1'}}
tmpl = template.Template(hot_tpl)
self.assertEqual(expected, tmpl[tmpl.OUTPUTS])
def test_get_outputs_bad_no_data(self):
"""Test get outputs without any mapping."""
hot_tpl = template_format.parse("""
heat_template_version: 2013-05-23
outputs:
output1:
""")
tmpl = template.Template(hot_tpl)
error = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.OUTPUTS)
self.assertEqual('Each output must contain a value key.',
six.text_type(error))
def test_get_outputs_bad_without_name(self):
"""Test get outputs without name."""
hot_tpl = template_format.parse("""
heat_template_version: 2013-05-23
outputs:
description: wrong output
value: value1
""")
tmpl = template.Template(hot_tpl)
error = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.OUTPUTS)
self.assertEqual('"outputs" must contain a map of output maps. '
'Found a [%s] instead' % six.text_type,
six.text_type(error))
def test_get_outputs_bad_description(self):
"""Test get outputs with bad description name."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
outputs:
output1:
Description: output1
value: value1
''')
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.OUTPUTS)
self.assertIn('Description', six.text_type(err))
def test_get_outputs_bad_value(self):
"""Test get outputs with bad value name."""
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
outputs:
output1:
description: output1
Value: value1
''')
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.OUTPUTS)
self.assertIn('Value', six.text_type(err))
def test_resource_group_list_join(self):
"""Test list_join on a ResourceGroup's inner attributes
This should not fail during validation (i.e. before the ResourceGroup
can return the list of the runtime values.
"""
hot_tpl = template_format.parse('''
heat_template_version: 2014-10-16
resources:
rg:
type: OS::Heat::ResourceGroup
properties:
count: 3
resource_def:
type: OS::Nova::Server
''')
tmpl = template.Template(hot_tpl)
stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl)
snippet = {'list_join': ["\n", {'get_attr': ['rg', 'name']}]}
self.assertEqual('', self.resolve(snippet, tmpl, stack))
# test list_join for liberty template
hot_tpl['heat_template_version'] = '2015-10-15'
tmpl = template.Template(hot_tpl)
stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl)
snippet = {'list_join': ["\n", {'get_attr': ['rg', 'name']}]}
self.assertEqual('', self.resolve(snippet, tmpl, stack))
# test list join again and update to multiple lists
snippet = {'list_join': ["\n",
{'get_attr': ['rg', 'name']},
{'get_attr': ['rg', 'name']}]}
self.assertEqual('', self.resolve(snippet, tmpl, stack))
def test_deletion_policy_titlecase(self):
hot_tpl = template_format.parse('''
heat_template_version: 2016-10-14
resources:
del:
type: OS::Heat::None
deletion_policy: Delete
ret:
type: OS::Heat::None
deletion_policy: Retain
snap:
type: OS::Heat::None
deletion_policy: Snapshot
''')
rsrc_defns = template.Template(hot_tpl).resource_definitions(None)
self.assertEqual(rsrc_defn.ResourceDefinition.DELETE,
rsrc_defns['del'].deletion_policy())
self.assertEqual(rsrc_defn.ResourceDefinition.RETAIN,
rsrc_defns['ret'].deletion_policy())
self.assertEqual(rsrc_defn.ResourceDefinition.SNAPSHOT,
rsrc_defns['snap'].deletion_policy())
def test_deletion_policy(self):
hot_tpl = template_format.parse('''
heat_template_version: 2016-10-14
resources:
del:
type: OS::Heat::None
deletion_policy: delete
ret:
type: OS::Heat::None
deletion_policy: retain
snap:
type: OS::Heat::None
deletion_policy: snapshot
''')
rsrc_defns = template.Template(hot_tpl).resource_definitions(None)
self.assertEqual(rsrc_defn.ResourceDefinition.DELETE,
rsrc_defns['del'].deletion_policy())
self.assertEqual(rsrc_defn.ResourceDefinition.RETAIN,
rsrc_defns['ret'].deletion_policy())
self.assertEqual(rsrc_defn.ResourceDefinition.SNAPSHOT,
rsrc_defns['snap'].deletion_policy())
def test_str_replace(self):
"""Test str_replace function."""
snippet = {'str_replace': {'template': 'Template var1 string var2',
'params': {'var1': 'foo', 'var2': 'bar'}}}
snippet_resolved = 'Template foo string bar'
tmpl = template.Template(hot_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_str_replace_map_param(self):
"""Test str_replace function with non-string params."""
snippet = {'str_replace': {'template': 'jsonvar1',
'params': {'jsonvar1': {'foo': 123}}}}
snippet_resolved = '{"foo": 123}'
tmpl = template.Template(hot_liberty_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_str_replace_list_param(self):
"""Test str_replace function with non-string params."""
snippet = {'str_replace': {'template': 'listvar1',
'params': {'listvar1': ['foo', 123]}}}
snippet_resolved = '["foo", 123]'
tmpl = template.Template(hot_liberty_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_str_replace_number(self):
"""Test str_replace function with numbers."""
snippet = {'str_replace': {'template': 'Template number string bar',
'params': {'number': 1}}}
snippet_resolved = 'Template 1 string bar'
tmpl = template.Template(hot_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_str_fn_replace(self):
"""Test Fn:Replace function."""
snippet = {'Fn::Replace': [{'$var1': 'foo', '$var2': 'bar'},
'Template $var1 string $var2']}
snippet_resolved = 'Template foo string bar'
tmpl = template.Template(hot_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_str_replace_order(self, tpl=hot_tpl_empty):
"""Test str_replace function substitution order."""
snippet = {'str_replace': {'template': '1234567890',
'params': {'1': 'a',
'12': 'b',
'123': 'c',
'1234': 'd',
'12345': 'e',
'123456': 'f',
'1234567': 'g'}}}
tmpl = template.Template(tpl)
self.assertEqual('g890', self.resolve(snippet, tmpl))
def test_str_replace_liberty_order(self):
"""Test str_replace function substitution order."""
self.test_str_replace_order(hot_liberty_tpl_empty)
def test_str_replace_syntax(self):
"""Test str_replace function syntax.
Pass wrong syntax (array instead of dictionary) to function and
validate that we get a TypeError.
"""
snippet = {'str_replace': [{'template': 'Template var1 string var2'},
{'params': {'var1': 'foo', 'var2': 'bar'}}]}
tmpl = template.Template(hot_tpl_empty)
self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
def test_str_replace_invalid_param_keys(self):
"""Test str_replace function parameter keys.
Pass wrong parameters to function and verify that we get
a KeyError.
"""
snippet = {'str_replace': {'tmpl': 'Template var1 string var2',
'params': {'var1': 'foo', 'var2': 'bar'}}}
tmpl = template.Template(hot_tpl_empty)
self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
snippet = {'str_replace': {'tmpl': 'Template var1 string var2',
'parms': {'var1': 'foo', 'var2': 'bar'}}}
self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
def test_str_replace_invalid_param_types(self):
"""Test str_replace function parameter values.
Pass parameter values of wrong type to function and verify that we get
a TypeError.
"""
snippet = {'str_replace': {'template': 12345,
'params': {'var1': 'foo', 'var2': 'bar'}}}
tmpl = template.Template(hot_tpl_empty)
self.assertRaises(TypeError, self.resolve, snippet, tmpl)
snippet = {'str_replace': {'template': 'Template var1 string var2',
'params': ['var1', 'foo', 'var2', 'bar']}}
ex = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
self.assertIn('.str_replace: "str_replace" parameters must be a'
' mapping', six.text_type(ex))
def test_str_replace_invalid_param_type_init(self):
"""Test str_replace function parameter values.
Pass parameter values of wrong type to function and verify that we get
a TypeError in the constructor.
"""
args = [['var1', 'foo', 'var2', 'bar'],
'Template var1 string var2']
ex = self.assertRaises(
TypeError,
cfn_functions.Replace,
None, 'Fn::Replace', args)
self.assertIn('parameters must be a mapping', six.text_type(ex))
def test_str_replace_ref_get_param(self):
"""Test str_replace referencing parameters."""
hot_tpl = template_format.parse('''
heat_template_version: 2015-04-30
parameters:
p_template:
type: string
default: foo-replaceme
p_params:
type: json
default:
replaceme: success
resources:
rsrc:
type: ResWithStringPropAndAttr
properties:
a_string:
str_replace:
template: {get_param: p_template}
params: {get_param: p_params}
outputs:
replaced:
value: {get_attr: [rsrc, string]}
''')
tmpl = template.Template(hot_tpl)
self.stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl)
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
self.assertEqual('foo-success', self.stack.output('replaced'))
def test_get_file(self):
"""Test get_file function."""
snippet = {'get_file': 'file:///tmp/foo.yaml'}
snippet_resolved = 'foo contents'
tmpl = template.Template(hot_tpl_empty, files={
'file:///tmp/foo.yaml': 'foo contents'
})
stack = parser.Stack(utils.dummy_context(), 'param_id_test', tmpl)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl, stack))
def test_get_file_not_string(self):
"""Test get_file function with non-string argument."""
snippet = {'get_file': ['file:///tmp/foo.yaml']}
tmpl = template.Template(hot_tpl_empty)
stack = parser.Stack(utils.dummy_context(), 'param_id_test', tmpl)
notStrErr = self.assertRaises(TypeError, self.resolve,
snippet, tmpl, stack)
self.assertEqual(
'Argument to "get_file" must be a string',
six.text_type(notStrErr))
def test_get_file_missing_files(self):
"""Test get_file function with no matching key in files section."""
snippet = {'get_file': 'file:///tmp/foo.yaml'}
tmpl = template.Template(hot_tpl_empty, files={
'file:///tmp/bar.yaml': 'bar contents'
})
stack = parser.Stack(utils.dummy_context(), 'param_id_test', tmpl)
missingErr = self.assertRaises(ValueError, self.resolve,
snippet, tmpl, stack)
self.assertEqual(
('No content found in the "files" section for '
'get_file path: file:///tmp/foo.yaml'),
six.text_type(missingErr))
def test_get_file_nested_does_not_resolve(self):
"""Test get_file function does not resolve nested calls."""
snippet = {'get_file': 'file:///tmp/foo.yaml'}
snippet_resolved = '{get_file: file:///tmp/bar.yaml}'
tmpl = template.Template(hot_tpl_empty, files={
'file:///tmp/foo.yaml': snippet_resolved,
'file:///tmp/bar.yaml': 'bar content',
})
stack = parser.Stack(utils.dummy_context(), 'param_id_test', tmpl)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl, stack))
def test_list_join(self):
snippet = {'list_join': [',', ['bar', 'baz']]}
snippet_resolved = 'bar,baz'
tmpl = template.Template(hot_kilo_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_join_multiple(self):
snippet = {'list_join': [',', ['bar', 'baz'], ['bar2', 'baz2']]}
snippet_resolved = 'bar,baz,bar2,baz2'
tmpl = template.Template(hot_liberty_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_list_join_empty_list(self):
snippet = {'list_join': [',', []]}
snippet_resolved = ''
k_tmpl = template.Template(hot_kilo_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, k_tmpl))
l_tmpl = template.Template(hot_liberty_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, l_tmpl))
def test_join_json(self):
snippet = {'list_join': [',', [{'foo': 'json'}, {'foo2': 'json2'}]]}
snippet_resolved = '{"foo": "json"},{"foo2": "json2"}'
l_tmpl = template.Template(hot_liberty_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, l_tmpl))
# old versions before liberty don't support to join json
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, k_tmpl)
self.assertEqual("Items to join must be strings not {'foo': 'json'}",
six.text_type(exc))
def test_join_object_type_fail(self):
not_serializable = object
snippet = {'list_join': [',', [not_serializable]]}
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, l_tmpl)
self.assertIn('Items to join must be string, map or list not',
six.text_type(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, k_tmpl)
self.assertIn("Items to join must be strings", six.text_type(exc))
def test_join_json_fail(self):
not_serializable = object
snippet = {'list_join': [',', [{'foo': not_serializable}]]}
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, l_tmpl)
self.assertIn('Items to join must be string, map or list',
six.text_type(exc))
self.assertIn("failed json serialization",
six.text_type(exc))
def test_join_invalid(self):
snippet = {'list_join': 'bad'}
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
self.assertIn('.list_join: Incorrect arguments to "list_join"',
six.text_type(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
self.assertIn('.list_join: Incorrect arguments to "list_join"',
six.text_type(exc1))
def test_join_int_invalid(self):
snippet = {'list_join': 5}
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
self.assertIn('.list_join: Incorrect arguments', six.text_type(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
self.assertIn('.list_join: Incorrect arguments', six.text_type(exc1))
def test_join_invalid_value(self):
snippet = {'list_join': [',']}
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
self.assertIn('.list_join: Incorrect arguments to "list_join"',
six.text_type(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
self.assertIn('.list_join: Incorrect arguments to "list_join"',
six.text_type(exc1))
def test_join_invalid_multiple(self):
snippet = {'list_join': [',', 'bad', ['foo']]}
tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
self.assertIn('must operate on a list', six.text_type(exc))
def test_merge(self):
snippet = {'map_merge': [{'f1': 'b1', 'f2': 'b2'}, {'f1': 'b2'}]}
tmpl = template.Template(hot_mitaka_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual('b2', resolved['f1'])
self.assertEqual('b2', resolved['f2'])
def test_merge_none(self):
snippet = {'map_merge': [{'f1': 'b1', 'f2': 'b2'}, None]}
tmpl = template.Template(hot_mitaka_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual('b1', resolved['f1'])
self.assertEqual('b2', resolved['f2'])
def test_merge_invalid(self):
snippet = {'map_merge': [{'f1': 'b1', 'f2': 'b2'}, ['f1', 'b2']]}
tmpl = template.Template(hot_mitaka_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
self.assertIn('Incorrect arguments', six.text_type(exc))
def test_merge_containing_repeat(self):
snippet = {'map_merge': {'repeat': {'template': {'ROLE': 'ROLE'},
'for_each': {'ROLE': ['role1', 'role2']}}}}
tmpl = template.Template(hot_mitaka_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual('role1', resolved['role1'])
self.assertEqual('role2', resolved['role2'])
def test_map_replace(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'keys': {'f1': 'F1'},
'values': {'b2': 'B2'}}]}
tmpl = template.Template(hot_newton_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual({'F1': 'b1', 'f2': 'B2'},
resolved)
def test_map_replace_nokeys(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'values': {'b2': 'B2'}}]}
tmpl = template.Template(hot_newton_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual({'f1': 'b1', 'f2': 'B2'},
resolved)
def test_map_replace_novalues(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'keys': {'f2': 'F2'}}]}
tmpl = template.Template(hot_newton_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual({'f1': 'b1', 'F2': 'b2'},
resolved)
def test_map_replace_none_values(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'values': None}]}
tmpl = template.Template(hot_newton_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual({'f1': 'b1', 'f2': 'b2'},
resolved)
def test_map_replace_none_keys(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'keys': None}]}
tmpl = template.Template(hot_newton_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual({'f1': 'b1', 'f2': 'b2'},
resolved)
def test_map_replace_unhashable_value(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': []},
{'values': {}}]}
tmpl = template.Template(hot_newton_tpl_empty)
resolved = self.resolve(snippet, tmpl)
self.assertEqual({'f1': 'b1', 'f2': []},
resolved)
def test_map_replace_keys_collide(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'keys': {'f2': 'f1'}}]}
tmpl = template.Template(hot_newton_tpl_empty)
msg = "key replacement f1 collides with a key in the input map"
self.assertRaisesRegexp(ValueError, msg, self.resolve, snippet, tmpl)
def test_map_replace_replaced_keys_collide(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'keys': {'f1': 'f3', 'f2': 'f3'}}]}
tmpl = template.Template(hot_newton_tpl_empty)
msg = "key replacement f3 collides with a key in the output map"
self.assertRaisesRegexp(ValueError, msg, self.resolve, snippet, tmpl)
def test_map_replace_invalid_str_arg1(self):
snippet = {'map_replace': 'ab'}
tmpl = template.Template(hot_newton_tpl_empty)
msg = "Incorrect arguments to \"map_replace\" should be:"
self.assertRaisesRegexp(TypeError, msg, self.resolve, snippet, tmpl)
def test_map_replace_invalid_str_arg2(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'}, "ab"]}
tmpl = template.Template(hot_newton_tpl_empty)
msg = ("Incorrect arguments: to \"map_replace\", "
"arguments must be a list of maps")
self.assertRaisesRegexp(TypeError, msg, self.resolve, snippet, tmpl)
def test_map_replace_invalid_empty(self):
snippet = {'map_replace': []}
tmpl = template.Template(hot_newton_tpl_empty)
msg = "Incorrect arguments to \"map_replace\" should be:"
self.assertRaisesRegexp(TypeError, msg, self.resolve, snippet, tmpl)
def test_map_replace_invalid_missing1(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'}]}
tmpl = template.Template(hot_newton_tpl_empty)
msg = "Incorrect arguments to \"map_replace\" should be:"
self.assertRaisesRegexp(TypeError, msg, self.resolve, snippet, tmpl)
def test_map_replace_invalid_missing2(self):
snippet = {'map_replace': [{'keys': {'f1': 'f3', 'f2': 'f3'}}]}
tmpl = template.Template(hot_newton_tpl_empty)
msg = "Incorrect arguments to \"map_replace\" should be:"
self.assertRaisesRegexp(TypeError, msg, self.resolve, snippet, tmpl)
def test_map_replace_invalid_wrongkey(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'notkeys': {'f2': 'F2'}}]}
tmpl = template.Template(hot_newton_tpl_empty)
msg = "Incorrect arguments to \"map_replace\" should be:"
self.assertRaisesRegexp(ValueError, msg, self.resolve, snippet, tmpl)
def test_yaql(self):
snippet = {'yaql': {'expression': '$.data.var1.sum()',
'data': {'var1': [1, 2, 3, 4]}}}
tmpl = template.Template(hot_newton_tpl_empty)
stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl)
resolved = self.resolve(snippet, tmpl, stack=stack)
self.assertEqual(10, resolved)
def test_yaql_invalid_data(self):
snippet = {'yaql': {'expression': '$.data.var1.sum()',
'data': 'mustbeamap'}}
tmpl = template.Template(hot_newton_tpl_empty)
msg = '.yaql: The "data" argument to "yaql" must contain a map.'
self.assertRaisesRegexp(exception.StackValidationFailed,
msg, self.resolve, snippet, tmpl)
def test_yaql_bogus_keys(self):
snippet = {'yaql': {'expression': '1 + 3',
'data': 'mustbeamap',
'bogus': ""}}
tmpl = template.Template(hot_newton_tpl_empty)
self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
def test_yaql_invalid_syntax(self):
snippet = {'yaql': {'wrong': 'wrong_expr',
'wrong_data': 'mustbeamap'}}
tmpl = template.Template(hot_newton_tpl_empty)
self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
def test_yaql_non_map_args(self):
snippet = {'yaql': 'invalid'}
tmpl = template.Template(hot_newton_tpl_empty)
msg = '.yaql: Arguments to "yaql" must be a map.'
self.assertRaisesRegexp(exception.StackValidationFailed,
msg, self.resolve, snippet, tmpl)
def test_yaql_invalid_expression(self):
snippet = {'yaql': {'expression': 'invalid(',
'data': {'var1': [1, 2, 3, 4]}}}
tmpl = template.Template(hot_newton_tpl_empty)
self.assertRaises(exception.StackValidationFailed,
tmpl.parse, None, snippet)
def test_yaql_data_as_function(self):
snippet = {'yaql': {'expression': '$.data.var1.len()',
'data': {
'var1': {'list_join': ['', ['1', '2']]}
}
}}
tmpl = template.Template(hot_newton_tpl_empty)
stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl)
resolved = self.resolve(snippet, tmpl, stack=stack)
self.assertEqual(2, resolved)
def test_equals(self):
hot_tpl = template_format.parse('''
heat_template_version: 2016-10-14
parameters:
env_type:
type: string
default: 'test'
''')
snippet = {'equals': [{'get_param': 'env_type'}, 'prod']}
# when param 'env_type' is 'test', equals function resolve to false
tmpl = template.Template(hot_tpl)
stack = parser.Stack(utils.dummy_context(),
'test_equals_false', tmpl)
resolved = self.resolve_condition(snippet, tmpl, stack)
self.assertFalse(resolved)
# when param 'env_type' is 'prod', equals function resolve to true
tmpl = template.Template(hot_tpl,
env=environment.Environment(
{'env_type': 'prod'}))
stack = parser.Stack(utils.dummy_context(),
'test_equals_true', tmpl)
resolved = self.resolve_condition(snippet, tmpl, stack)
self.assertTrue(resolved)
def test_equals_invalid_args(self):
tmpl = template.Template(hot_newton_tpl_empty)
snippet = {'equals': ['test', 'prod', 'invalid']}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
error_msg = ('.equals: Arguments to "equals" must be '
'of the form: [value_1, value_2]')
self.assertIn(error_msg, six.text_type(exc))
snippet = {'equals': "invalid condition"}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
self.assertIn(error_msg, six.text_type(exc))
def test_equals_with_non_supported_function(self):
tmpl = template.Template(hot_newton_tpl_empty)
snippet = {'equals': [{'get_attr': [None, 'att1']},
{'get_attr': [None, 'att2']}]}
exc = self.assertRaises(exception.InvalidConditionFunction,
self.resolve_condition, snippet, tmpl)
error_msg = 'The function is not supported in condition: get_attr'
self.assertIn(error_msg, six.text_type(exc))
def test_if(self):
snippet = {'if': ['create_prod', 'value_if_true', 'value_if_false']}
# when condition evaluates to true, if function
# resolve to value_if_true
tmpl = template.Template(hot_newton_tpl_empty)
stack = parser.Stack(utils.dummy_context(),
'test_if_function', tmpl)
tmpl._conditions = {'create_prod': True}
resolved = self.resolve(snippet, tmpl, stack)
self.assertEqual('value_if_true', resolved)
# when condition evaluates to false, if function
# resolve to value_if_false
tmpl._conditions = {'create_prod': False}
resolved = self.resolve(snippet, tmpl, stack)
self.assertEqual('value_if_false', resolved)
def test_if_invalid_args(self):
snippet = {'if': ['create_prod', 'one_value']}
tmpl = template.Template(hot_newton_tpl_empty)
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
self.assertIn('Arguments to "if" must be of the form: '
'[condition_name, value_if_true, value_if_false]',
six.text_type(exc))
def test_if_condition_name_non_existing(self):
snippet = {'if': ['cd_not_existing', 'value_true', 'value_false']}
tmpl = template.Template(hot_newton_tpl_empty)
stack = parser.Stack(utils.dummy_context(),
'test_if_function', tmpl)
tmpl._conditions = {'create_prod': True}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl, stack)
self.assertIn('Invalid condition name "cd_not_existing"',
six.text_type(exc))
def test_repeat(self):
"""Test repeat function."""
snippet = {'repeat': {'template': 'this is %var%',
'for_each': {'%var%': ['a', 'b', 'c']}}}
snippet_resolved = ['this is a', 'this is b', 'this is c']
tmpl = template.Template(hot_kilo_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_repeat_get_param(self):
"""Test repeat function with get_param function as an argument."""
hot_tpl = template_format.parse('''
heat_template_version: 2015-04-30
parameters:
param:
type: comma_delimited_list
default: 'a,b,c'
''')
snippet = {'repeat': {'template': 'this is var%',
'for_each': {'var%': {'get_param': 'param'}}}}
snippet_resolved = ['this is a', 'this is b', 'this is c']
tmpl = template.Template(hot_tpl)
stack = parser.Stack(utils.dummy_context(), 'test_stack', tmpl)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl, stack))
def test_repeat_dict_with_no_replacement(self):
snippet = {'repeat': {'template': {'SERVICE_enabled': True},
'for_each': {'SERVICE': ['x', 'y', 'z']}}}
snippet_resolved = [{'x_enabled': True},
{'y_enabled': True},
{'z_enabled': True}]
tmpl = template.Template(hot_newton_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_repeat_dict_template(self):
"""Test repeat function with a dictionary as a template."""
snippet = {'repeat': {'template': {'key-%var%': 'this is %var%'},
'for_each': {'%var%': ['a', 'b', 'c']}}}
snippet_resolved = [{'key-a': 'this is a'},
{'key-b': 'this is b'},
{'key-c': 'this is c'}]
tmpl = template.Template(hot_kilo_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_repeat_list_template(self):
"""Test repeat function with a list as a template."""
snippet = {'repeat': {'template': ['this is %var%', 'static'],
'for_each': {'%var%': ['a', 'b', 'c']}}}
snippet_resolved = [['this is a', 'static'],
['this is b', 'static'],
['this is c', 'static']]
tmpl = template.Template(hot_kilo_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_repeat_multi_list(self):
"""Test repeat function with multiple input lists."""
snippet = {'repeat': {'template': 'this is %var1%-%var2%',
'for_each': {'%var1%': ['a', 'b', 'c'],
'%var2%': ['1', '2']}}}
snippet_resolved = ['this is a-1', 'this is b-1', 'this is c-1',
'this is a-2', 'this is b-2', 'this is c-2']
tmpl = template.Template(hot_kilo_tpl_empty)
result = self.resolve(snippet, tmpl)
self.assertEqual(len(result), len(snippet_resolved))
for item in result:
self.assertIn(item, snippet_resolved)
def test_repeat_list_and_map(self):
"""Test repeat function with a list and a map."""
snippet = {'repeat': {'template': 'this is %var1%-%var2%',
'for_each': {'%var1%': ['a', 'b', 'c'],
'%var2%': {'x': 'v', 'y': 'v'}}}}
snippet_resolved = ['this is a-x', 'this is b-x', 'this is c-x',
'this is a-y', 'this is b-y', 'this is c-y']
tmpl = template.Template(hot_newton_tpl_empty)
result = self.resolve(snippet, tmpl)
self.assertEqual(len(result), len(snippet_resolved))
for item in result:
self.assertIn(item, snippet_resolved)
def test_repeat_bad_args(self):
"""Tests reporting error by repeat function.
Test that the repeat function reports a proper error when missing or
invalid arguments.
"""
tmpl = template.Template(hot_kilo_tpl_empty)
# missing for_each
snippet = {'repeat': {'template': 'this is %var%'}}
self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
# misspelled for_each
snippet = {'repeat': {'template': 'this is %var%',
'foreach': {'%var%': ['a', 'b', 'c']}}}
self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
# misspelled template
snippet = {'repeat': {'templte': 'this is %var%',
'for_each': {'%var%': ['a', 'b', 'c']}}}
self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
def test_repeat_bad_arg_type(self):
tmpl = template.Template(hot_kilo_tpl_empty)
# for_each is not a map
snippet = {'repeat': {'template': 'this is %var%',
'for_each': '%var%'}}
self.assertRaises(exception.StackValidationFailed,
tmpl.parse, None, snippet)
def test_digest(self):
snippet = {'digest': ['md5', 'foobar']}
snippet_resolved = '3858f62230ac3c915f300c664312c63f'
tmpl = template.Template(hot_kilo_tpl_empty)
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_digest_invalid_types(self):
tmpl = template.Template(hot_kilo_tpl_empty)
invalid_snippets = [
{'digest': 'invalid'},
{'digest': {'foo': 'invalid'}},
{'digest': [123]},
]
for snippet in invalid_snippets:
exc = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
self.assertIn('must be a list of strings', six.text_type(exc))
def test_digest_incorrect_number_arguments(self):
tmpl = template.Template(hot_kilo_tpl_empty)
invalid_snippets = [
{'digest': []},
{'digest': ['foo']},
{'digest': ['md5']},
{'digest': ['md5', 'foo', 'bar']},
]
for snippet in invalid_snippets:
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('usage: ["<algorithm>", "<value>"]',
six.text_type(exc))
def test_digest_invalid_algorithm(self):
tmpl = template.Template(hot_kilo_tpl_empty)
snippet = {'digest': ['invalid_algorithm', 'foobar']}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('Algorithm must be one of', six.text_type(exc))
def test_str_split(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',', 'bar,baz']}
snippet_resolved = ['bar', 'baz']
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_str_split_index(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',', 'bar,baz', 1]}
snippet_resolved = 'baz'
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_str_split_index_str(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',', 'bar,baz', '1']}
snippet_resolved = 'baz'
self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
def test_str_split_index_bad(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',', 'bar,baz', 'bad']}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('Incorrect index to \"str_split\"', six.text_type(exc))
def test_str_split_index_out_of_range(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',', 'bar,baz', '2']}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
expected = 'Incorrect index to \"str_split\" should be between 0 and 1'
self.assertEqual(expected, six.text_type(exc))
def test_str_split_bad_novalue(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',']}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('Incorrect arguments to \"str_split\"',
six.text_type(exc))
def test_str_split_bad_empty(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': []}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('Incorrect arguments to \"str_split\"',
six.text_type(exc))
def test_str_split_none_string_to_split(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': ['.', None]}
self.assertIsNone(self.resolve(snippet, tmpl))
def test_str_split_none_delim(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [None, 'check']}
self.assertEqual(['check'], self.resolve(snippet, tmpl))
def test_prevent_parameters_access(self):
"""Check parameters section inaccessible using the template as a dict.
Test that the parameters section can't be accessed using the template
as a dictionary.
"""
expected_description = "This can be accessed"
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
description: {0}
parameters:
foo:
type: string
'''.format(expected_description))
tmpl = template.Template(hot_tpl)
self.assertEqual(expected_description, tmpl['description'])
err_str = "can not be accessed directly"
# Hot template test
keyError = self.assertRaises(KeyError, tmpl.__getitem__, 'parameters')
self.assertIn(err_str, six.text_type(keyError))
# CFN template test
keyError = self.assertRaises(KeyError, tmpl.__getitem__, 'Parameters')
self.assertIn(err_str, six.text_type(keyError))
def test_parameters_section_not_iterable(self):
"""Check parameters section is not returned using the template as iter.
Test that the parameters section is not returned when the template is
used as an iterable.
"""
expected_description = "This can be accessed"
tmpl = template.Template({'heat_template_version': '2013-05-23',
'description': expected_description,
'parameters':
{'foo': {'Type': 'String',
'Required': True}}})
self.assertEqual(expected_description, tmpl['description'])
self.assertNotIn('parameters', tmpl.keys())
def test_invalid_hot_version(self):
"""Test HOT version check.
Pass an invalid HOT version to template.Template.__new__() and
validate that we get a ValueError.
"""
tmpl_str = "heat_template_version: this-ain't-valid"
hot_tmpl = template_format.parse(tmpl_str)
self.assertRaises(exception.InvalidTemplateVersion,
template.Template, hot_tmpl)
def test_valid_hot_version(self):
"""Test HOT version check.
Pass a valid HOT version to template.Template.__new__() and
validate that we get back a parsed template.
"""
tmpl_str = "heat_template_version: 2013-05-23"
hot_tmpl = template_format.parse(tmpl_str)
parsed_tmpl = template.Template(hot_tmpl)
expected = ('heat_template_version', '2013-05-23')
observed = parsed_tmpl.version
self.assertEqual(expected, observed)
def test_resource_facade(self):
metadata_snippet = {'resource_facade': 'metadata'}
deletion_policy_snippet = {'resource_facade': 'deletion_policy'}
update_policy_snippet = {'resource_facade': 'update_policy'}
parent_resource = DummyClass()
parent_resource.metadata_set({"foo": "bar"})
parent_resource.t = rsrc_defn.ResourceDefinition(
'parent', 'SomeType',
deletion_policy=rsrc_defn.ResourceDefinition.RETAIN,
update_policy={"blarg": "wibble"})
parent_resource.stack = parser.Stack(utils.dummy_context(),
'toplevel_stack',
template.Template(hot_tpl_empty))
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(hot_tpl_empty),
parent_resource='parent')
stack._parent_stack = dict(parent=parent_resource)
self.assertEqual({"foo": "bar"},
self.resolve(metadata_snippet, stack.t, stack))
self.assertEqual('Retain',
self.resolve(deletion_policy_snippet, stack.t, stack))
self.assertEqual({"blarg": "wibble"},
self.resolve(update_policy_snippet, stack.t, stack))
def test_resource_facade_function(self):
deletion_policy_snippet = {'resource_facade': 'deletion_policy'}
parent_resource = DummyClass()
parent_resource.metadata_set({"foo": "bar"})
tmpl = template.Template(hot_juno_tpl_empty)
parent_resource.stack = parser.Stack(utils.dummy_context(),
'toplevel_stack',
tmpl)
del_policy = hot_functions.Join(parent_resource.stack,
'list_join', ['eta', ['R', 'in']])
parent_resource.t = rsrc_defn.ResourceDefinition(
'parent', 'SomeType',
deletion_policy=del_policy)
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(hot_tpl_empty),
parent_resource='parent')
stack._parent_stack = dict(parent=parent_resource)
self.assertEqual('Retain',
self.resolve(deletion_policy_snippet, stack.t, stack))
def test_resource_facade_invalid_arg(self):
snippet = {'resource_facade': 'wibble'}
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(hot_tpl_empty))
error = self.assertRaises(exception.StackValidationFailed,
self.resolve,
snippet,
stack.t, stack)
self.assertIn(next(iter(snippet)), six.text_type(error))
def test_resource_facade_missing_deletion_policy(self):
snippet = {'resource_facade': 'deletion_policy'}
parent_resource = DummyClass()
parent_resource.metadata_set({"foo": "bar"})
parent_resource.t = rsrc_defn.ResourceDefinition('parent', 'SomeType')
parent_stack = parser.Stack(utils.dummy_context(),
'toplevel_stack',
template.Template(hot_tpl_empty))
parent_stack._resources = {'parent': parent_resource}
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(hot_tpl_empty),
parent_resource='parent')
stack._parent_stack = parent_stack
self.assertEqual('Delete', self.resolve(snippet, stack.t, stack))
def test_removed_function(self):
snippet = {'Fn::GetAZs': ''}
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(hot_juno_tpl_empty))
error = self.assertRaises(exception.StackValidationFailed,
stack.t.parse, stack, snippet)
self.assertIn(next(iter(snippet)), six.text_type(error))
def test_add_resource(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on:
- dummy
deletion_policy: Retain
update_policy:
foo: bar
resource2:
type: AWS::EC2::Instance
''')
source = template.Template(hot_tpl)
empty = template.Template(copy.deepcopy(hot_tpl_empty))
stack = parser.Stack(utils.dummy_context(), 'test_stack', source)
for defn in six.itervalues(source.resource_definitions(stack)):
empty.add_resource(defn)
self.assertEqual(hot_tpl['resources'], empty.t['resources'])
class HotStackTest(common.HeatTestCase):
"""Test stack function when stack was created from HOT template."""
def setUp(self):
super(HotStackTest, self).setUp()
self.tmpl = template.Template(copy.deepcopy(empty_template))
self.ctx = utils.dummy_context()
def resolve(self, snippet):
return function.resolve(self.stack.t.parse(self.stack, snippet))
def test_repeat_get_attr(self):
"""Test repeat function with get_attr function as an argument."""
tmpl = template.Template(hot_tpl_complex_attrs_all_attrs)
self.stack = parser.Stack(self.ctx, 'test_repeat_get_attr', tmpl)
snippet = {'repeat': {'template': 'this is %var%',
'for_each': {'%var%': {'get_attr': ['resource1', 'list']}}}}
repeat = self.stack.t.parse(self.stack, snippet)
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
self.assertEqual(['this is foo', 'this is bar'],
function.resolve(repeat))
def test_get_attr_multiple_rsrc_status(self):
"""Test resolution of get_attr occurrences in HOT template."""
hot_tpl = hot_tpl_generic_resource
self.stack = parser.Stack(self.ctx, 'test_get_attr',
template.Template(hot_tpl))
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
snippet = {'Value': {'get_attr': ['resource1', 'foo']}}
rsrc = self.stack['resource1']
for action, status in (
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE)):
rsrc.state_set(action, status)
# GenericResourceType has an attribute 'foo' which yields the
# resource name.
self.assertEqual({'Value': 'resource1'}, self.resolve(snippet))
def test_get_attr_invalid(self):
"""Test resolution of get_attr occurrences in HOT template."""
hot_tpl = hot_tpl_generic_resource
self.stack = parser.Stack(self.ctx, 'test_get_attr',
template.Template(hot_tpl))
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
self.assertRaises(exception.InvalidTemplateAttribute,
self.resolve,
{'Value': {'get_attr': ['resource1', 'NotThere']}})
def test_get_attr_invalid_resource(self):
"""Test resolution of get_attr occurrences in HOT template."""
hot_tpl = hot_tpl_complex_attrs
self.stack = parser.Stack(self.ctx,
'test_get_attr_invalid_none',
template.Template(hot_tpl))
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
snippet = {'Value': {'get_attr': ['resource2', 'who_cares']}}
self.assertRaises(exception.InvalidTemplateReference,
self.resolve, snippet)
def test_get_resource(self):
"""Test resolution of get_resource occurrences in HOT template."""
hot_tpl = hot_tpl_generic_resource
self.stack = parser.Stack(self.ctx, 'test_get_resource',
template.Template(hot_tpl))
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
snippet = {'value': {'get_resource': 'resource1'}}
self.assertEqual({'value': 'resource1'}, self.resolve(snippet))
def test_set_param_id(self):
tmpl = template.Template(hot_tpl_empty)
self.stack = parser.Stack(self.ctx, 'param_id_test', tmpl)
self.assertEqual('None', self.stack.parameters['OS::stack_id'])
self.stack.store()
stack_identifier = self.stack.identifier()
self.assertEqual(self.stack.id, self.stack.parameters['OS::stack_id'])
self.assertEqual(stack_identifier.stack_id,
self.stack.parameters['OS::stack_id'])
self.m.VerifyAll()
def test_set_wrong_param(self):
tmpl = template.Template(hot_tpl_empty)
stack_id = identifier.HeatIdentifier('', "stack_testit", None)
params = tmpl.parameters(None, {})
self.assertFalse(params.set_stack_id(None))
self.assertTrue(params.set_stack_id(stack_id))
def test_set_param_id_update(self):
tmpl = template.Template(
{'heat_template_version': '2013-05-23',
'resources': {'AResource': {'type': 'ResourceWithPropsType',
'metadata': {'Bar': {'get_param': 'OS::stack_id'}},
'properties': {'Foo': 'abc'}}}})
self.stack = parser.Stack(self.ctx, 'update_stack_id_test', tmpl)
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
stack_id = self.stack.parameters['OS::stack_id']
tmpl2 = template.Template(
{'heat_template_version': '2013-05-23',
'resources': {'AResource': {'type': 'ResourceWithPropsType',
'metadata': {'Bar': {'get_param': 'OS::stack_id'}},
'properties': {'Foo': 'xyz'}}}})
updated_stack = parser.Stack(self.ctx, 'updated_stack', tmpl2)
self.stack.update(updated_stack)
self.assertEqual((parser.Stack.UPDATE, parser.Stack.COMPLETE),
self.stack.state)
self.assertEqual('xyz', self.stack['AResource'].properties['Foo'])
self.assertEqual(stack_id,
self.stack['AResource'].metadata_get()['Bar'])
def test_load_param_id(self):
tmpl = template.Template(hot_tpl_empty)
self.stack = parser.Stack(self.ctx, 'param_load_id_test', tmpl)
self.stack.store()
stack_identifier = self.stack.identifier()
self.assertEqual(stack_identifier.stack_id,
self.stack.parameters['OS::stack_id'])
newstack = parser.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(stack_identifier.stack_id,
newstack.parameters['OS::stack_id'])
def test_update_modify_param_ok_replace(self):
tmpl = {
'heat_template_version': '2013-05-23',
'parameters': {
'foo': {'type': 'string'}
},
'resources': {
'AResource': {
'type': 'ResourceWithPropsType',
'properties': {'Foo': {'get_param': 'foo'}}
}
}
}
self.m.StubOutWithMock(generic_rsrc.ResourceWithProps,
'update_template_diff')
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(
tmpl, env=environment.Environment(
{'foo': 'abc'})))
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(
tmpl, env=environment.Environment(
{'foo': 'xyz'})))
def check_props(*args):
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
generic_rsrc.ResourceWithProps.update_template_diff(
rsrc_defn.ResourceDefinition('AResource',
'ResourceWithPropsType',
properties={'Foo': 'xyz'}),
rsrc_defn.ResourceDefinition('AResource',
'ResourceWithPropsType',
properties={'Foo': 'abc'})
).WithSideEffects(check_props).AndRaise(resource.UpdateReplace)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual((parser.Stack.UPDATE, parser.Stack.COMPLETE),
self.stack.state)
self.assertEqual('xyz', self.stack['AResource'].properties['Foo'])
self.m.VerifyAll()
def test_update_modify_files_ok_replace(self):
tmpl = {
'heat_template_version': '2013-05-23',
'parameters': {},
'resources': {
'AResource': {
'type': 'ResourceWithPropsType',
'properties': {'Foo': {'get_file': 'foo'}}
}
}
}
self.m.StubOutWithMock(generic_rsrc.ResourceWithProps,
'update_template_diff')
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl,
files={'foo': 'abc'}))
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl,
files={'foo': 'xyz'}))
def check_props(*args):
self.assertEqual('abc', self.stack['AResource'].properties['Foo'])
generic_rsrc.ResourceWithProps.update_template_diff(
rsrc_defn.ResourceDefinition('AResource',
'ResourceWithPropsType',
properties={'Foo': 'xyz'}),
rsrc_defn.ResourceDefinition('AResource',
'ResourceWithPropsType',
properties={'Foo': 'abc'})
).WithSideEffects(check_props).AndRaise(resource.UpdateReplace)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual((parser.Stack.UPDATE, parser.Stack.COMPLETE),
self.stack.state)
self.assertEqual('xyz', self.stack['AResource'].properties['Foo'])
self.m.VerifyAll()
class StackAttributesTest(common.HeatTestCase):
"""Test get_attr function when stack was created from HOT template."""
def setUp(self):
super(StackAttributesTest, self).setUp()
self.ctx = utils.dummy_context()
self.m.ReplayAll()
scenarios = [
# for hot template 2013-05-23, get_attr: hot_funcs.GetAttThenSelect
('get_flat_attr',
dict(hot_tpl=hot_tpl_generic_resource,
snippet={'Value': {'get_attr': ['resource1', 'foo']}},
resource_name='resource1',
expected={'Value': 'resource1'})),
('get_list_attr',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1', 'list', 0]}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.list[0]})),
('get_flat_dict_attr',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1',
'flat_dict',
'key2']}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
flat_dict['key2']})),
('get_nested_attr_list',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1',
'nested_dict',
'list',
0]}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
nested_dict['list'][0]})),
('get_nested_attr_dict',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1',
'nested_dict',
'dict',
'a']}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
nested_dict['dict']['a']})),
('get_attr_none',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1',
'none',
'who_cares']}},
resource_name='resource1',
expected={'Value': None})),
# for hot template version 2014-10-16 and 2015-04-30,
# get_attr: hot_funcs.GetAtt
('get_flat_attr',
dict(hot_tpl=hot_tpl_generic_resource_20141016,
snippet={'Value': {'get_attr': ['resource1', 'foo']}},
resource_name='resource1',
expected={'Value': 'resource1'})),
('get_list_attr',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1', 'list', 0]}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.list[0]})),
('get_flat_dict_attr',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1',
'flat_dict',
'key2']}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
flat_dict['key2']})),
('get_nested_attr_list',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1',
'nested_dict',
'list',
0]}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
nested_dict['list'][0]})),
('get_nested_attr_dict',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1',
'nested_dict',
'dict',
'a']}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
nested_dict['dict']['a']})),
('get_attr_none',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1',
'none',
'who_cares']}},
resource_name='resource1',
expected={'Value': None}))
]
def test_get_attr(self):
"""Test resolution of get_attr occurrences in HOT template."""
self.stack = parser.Stack(self.ctx, 'test_get_attr',
template.Template(self.hot_tpl))
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
rsrc = self.stack[self.resource_name]
for action, status in (
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.SUSPEND, rsrc.IN_PROGRESS),
(rsrc.SUSPEND, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE),
(rsrc.SNAPSHOT, rsrc.IN_PROGRESS),
(rsrc.SNAPSHOT, rsrc.COMPLETE),
(rsrc.CHECK, rsrc.IN_PROGRESS),
(rsrc.CHECK, rsrc.COMPLETE),
(rsrc.ADOPT, rsrc.IN_PROGRESS),
(rsrc.ADOPT, rsrc.COMPLETE)):
rsrc.state_set(action, status)
resolved = function.resolve(self.stack.t.parse(self.stack,
self.snippet))
self.assertEqual(self.expected, resolved)
class StackGetAttributesTestConvergence(common.HeatTestCase):
def setUp(self):
super(StackGetAttributesTestConvergence, self).setUp()
self.ctx = utils.dummy_context()
self.m.ReplayAll()
scenarios = [
# for hot template 2013-05-23, get_attr: hot_funcs.GetAttThenSelect
('get_flat_attr',
dict(hot_tpl=hot_tpl_generic_resource,
snippet={'Value': {'get_attr': ['resource1', 'foo']}},
resource_name='resource1',
expected={'Value': 'resource1'})),
('get_list_attr',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1', 'list', 0]}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.list[0]})),
('get_flat_dict_attr',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1',
'flat_dict',
'key2']}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
flat_dict['key2']})),
('get_nested_attr_list',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1',
'nested_dict',
'list',
0]}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
nested_dict['list'][0]})),
('get_nested_attr_dict',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1',
'nested_dict',
'dict',
'a']}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
nested_dict['dict']['a']})),
('get_attr_none',
dict(hot_tpl=hot_tpl_complex_attrs,
snippet={'Value': {'get_attr': ['resource1',
'none',
'who_cares']}},
resource_name='resource1',
expected={'Value': None})),
# for hot template version 2014-10-16 and 2015-04-30,
# get_attr: hot_funcs.GetAtt
('get_flat_attr',
dict(hot_tpl=hot_tpl_generic_resource_20141016,
snippet={'Value': {'get_attr': ['resource1', 'foo']}},
resource_name='resource1',
expected={'Value': 'resource1'})),
('get_list_attr',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1', 'list', 0]}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.list[0]})),
('get_flat_dict_attr',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1',
'flat_dict',
'key2']}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
flat_dict['key2']})),
('get_nested_attr_list',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1',
'nested_dict',
'list',
0]}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
nested_dict['list'][0]})),
('get_nested_attr_dict',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1',
'nested_dict',
'dict',
'a']}},
resource_name='resource1',
expected={
'Value':
generic_rsrc.ResourceWithComplexAttributes.
nested_dict['dict']['a']})),
('get_attr_none',
dict(hot_tpl=hot_tpl_complex_attrs_20141016,
snippet={'Value': {'get_attr': ['resource1',
'none',
'who_cares']}},
resource_name='resource1',
expected={'Value': None}))
]
def _prepare_cache_data(self, rsrc):
attributes = function.dep_attrs(
self.stack.t.parse(self.stack, self.snippet),
self.resource_name)
# store as cache data
self.stack.cache_data = {
rsrc.name: {
'attrs': cr._resolve_attributes(attributes, rsrc)
}
}
def test_get_attr_convergence(self):
"""Test resolution of get_attr occurrences with convergence."""
self.stack = parser.Stack(self.ctx, 'test_get_attr',
template.Template(self.hot_tpl))
self.stack.store()
self.stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
self.stack.state)
rsrc = self.stack[self.resource_name]
self._prepare_cache_data(rsrc)
with mock.patch.object(resource.Resource, 'get_attribute') as mock_ga:
for action, status in (
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.SUSPEND, rsrc.IN_PROGRESS),
(rsrc.SUSPEND, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE),
(rsrc.SNAPSHOT, rsrc.IN_PROGRESS),
(rsrc.SNAPSHOT, rsrc.COMPLETE),
(rsrc.CHECK, rsrc.IN_PROGRESS),
(rsrc.CHECK, rsrc.COMPLETE),
(rsrc.ADOPT, rsrc.IN_PROGRESS),
(rsrc.ADOPT, rsrc.COMPLETE)):
rsrc.state_set(action, status)
resolved = function.resolve(self.stack.t.parse(self.stack,
self.snippet))
self.assertEqual(self.expected, resolved)
# get_attribute should never be called, everything
# should be resolved from cache data
self.assertFalse(mock_ga.called)
class StackGetAttrValidationTest(common.HeatTestCase):
def setUp(self):
super(StackGetAttrValidationTest, self).setUp()
self.ctx = utils.dummy_context()
def test_validate_props_from_attrs(self):
stack = parser.Stack(self.ctx, 'test_props_from_attrs',
template.Template(hot_tpl_mapped_props))
stack.resources['resource1'].list = None
stack.resources['resource1'].map = None
stack.resources['resource1'].string = None
try:
stack.validate()
except exception.StackValidationFailed as exc:
self.fail("Validation should have passed: %s" % six.text_type(exc))
self.assertEqual([],
stack.resources['resource2'].properties['a_list'])
self.assertEqual({},
stack.resources['resource2'].properties['a_map'])
self.assertEqual('',
stack.resources['resource2'].properties['a_string'])
def test_validate_props_from_attrs_all_attrs(self):
stack = parser.Stack(self.ctx, 'test_props_from_attrs',
template.Template(hot_tpl_mapped_props_all_attrs))
stack.resources['resource1'].list = None
stack.resources['resource1'].map = None
stack.resources['resource1'].string = None
try:
stack.validate()
except exception.StackValidationFailed as exc:
self.fail("Validation should have passed: %s" % six.text_type(exc))
self.assertEqual([],
stack.resources['resource2'].properties['a_list'])
self.assertEqual({},
stack.resources['resource2'].properties['a_map'])
self.assertEqual('',
stack.resources['resource2'].properties['a_string'])
class StackParametersTest(common.HeatTestCase):
"""Test get_param function when stack was created from HOT template."""
scenarios = [
('Ref_string',
dict(params={'foo': 'bar', 'blarg': 'wibble'},
snippet={'properties': {'prop1': {'Ref': 'foo'},
'prop2': {'Ref': 'blarg'}}},
expected={'properties': {'prop1': 'bar',
'prop2': 'wibble'}})),
('get_param_string',
dict(params={'foo': 'bar', 'blarg': 'wibble'},
snippet={'properties': {'prop1': {'get_param': 'foo'},
'prop2': {'get_param': 'blarg'}}},
expected={'properties': {'prop1': 'bar',
'prop2': 'wibble'}})),
('get_list_attr',
dict(params={'list': 'foo,bar'},
snippet={'properties': {'prop1': {'get_param': ['list', 1]}}},
expected={'properties': {'prop1': 'bar'}})),
('get_list_attr_string_index',
dict(params={'list': 'foo,bar'},
snippet={'properties': {'prop1': {'get_param': ['list', '1']}}},
expected={'properties': {'prop1': 'bar'}})),
('get_flat_dict_attr',
dict(params={'flat_dict':
{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}},
snippet={'properties': {'prop1': {'get_param':
['flat_dict', 'key2']}}},
expected={'properties': {'prop1': 'val2'}})),
('get_nested_attr_list',
dict(params={'nested_dict':
{'list': [1, 2, 3],
'string': 'abc',
'dict': {'a': 1, 'b': 2, 'c': 3}}},
snippet={'properties': {'prop1': {'get_param':
['nested_dict',
'list',
0]}}},
expected={'properties': {'prop1': 1}})),
('get_nested_attr_dict',
dict(params={'nested_dict':
{'list': [1, 2, 3],
'string': 'abc',
'dict': {'a': 1, 'b': 2, 'c': 3}}},
snippet={'properties': {'prop1': {'get_param':
['nested_dict',
'dict',
'a']}}},
expected={'properties': {'prop1': 1}})),
('get_attr_none',
dict(params={'none': None},
snippet={'properties': {'prop1': {'get_param':
['none',
'who_cares']}}},
expected={'properties': {'prop1': ''}})),
('pseudo_stack_id',
dict(params={},
snippet={'properties': {'prop1': {'get_param':
'OS::stack_id'}}},
expected={'properties':
{'prop1': '1ba8c334-2297-4312-8c7c-43763a988ced'}})),
('pseudo_stack_name',
dict(params={},
snippet={'properties': {'prop1': {'get_param':
'OS::stack_name'}}},
expected={'properties': {'prop1': 'test'}})),
('pseudo_project_id',
dict(params={},
snippet={'properties': {'prop1': {'get_param':
'OS::project_id'}}},
expected={'properties':
{'prop1': '9913ef0a-b8be-4b33-b574-9061441bd373'}})),
]
props_template = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
foo:
type: string
default: ''
blarg:
type: string
default: ''
list:
type: comma_delimited_list
default: ''
flat_dict:
type: json
default: {}
nested_dict:
type: json
default: {}
none:
type: string
default: 'default'
''')
def test_param_refs(self):
"""Test if parameter references work."""
env = environment.Environment(self.params)
tmpl = template.Template(self.props_template, env=env)
stack = parser.Stack(utils.dummy_context(), 'test', tmpl,
stack_id='1ba8c334-2297-4312-8c7c-43763a988ced',
tenant_id='9913ef0a-b8be-4b33-b574-9061441bd373')
self.assertEqual(self.expected,
function.resolve(tmpl.parse(stack, self.snippet)))
class HOTParamValidatorTest(common.HeatTestCase):
"""Test HOTParamValidator."""
def test_multiple_constraint_descriptions(self):
len_desc = 'string length should be between 8 and 16'
pattern_desc1 = 'Value must consist of characters only'
pattern_desc2 = 'Value must start with a lowercase character'
param = {
'db_name': {
'description': 'The WordPress database name',
'type': 'string',
'default': 'wordpress',
'constraints': [
{'length': {'min': 6, 'max': 16},
'description': len_desc},
{'allowed_pattern': '[a-zA-Z]+',
'description': pattern_desc1},
{'allowed_pattern': '[a-z]+[a-zA-Z]*',
'description': pattern_desc2}]}}
name = 'db_name'
schema = param['db_name']
def v(value):
param_schema = hot_param.HOTParamSchema.from_dict(name, schema)
param_schema.validate()
param_schema.validate_value(value)
return True
value = 'wp'
err = self.assertRaises(exception.StackValidationFailed, v, value)
self.assertIn(len_desc, six.text_type(err))
value = 'abcdefghijklmnopq'
err = self.assertRaises(exception.StackValidationFailed, v, value)
self.assertIn(len_desc, six.text_type(err))
value = 'abcdefgh1'
err = self.assertRaises(exception.StackValidationFailed, v, value)
self.assertIn(pattern_desc1, six.text_type(err))
value = 'Abcdefghi'
err = self.assertRaises(exception.StackValidationFailed, v, value)
self.assertIn(pattern_desc2, six.text_type(err))
value = 'abcdefghi'
self.assertTrue(v(value))
value = 'abcdefghI'
self.assertTrue(v(value))
def test_hot_template_validate_param(self):
len_desc = 'string length should be between 8 and 16'
pattern_desc1 = 'Value must consist of characters only'
pattern_desc2 = 'Value must start with a lowercase character'
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
db_name:
description: The WordPress database name
type: string
default: wordpress
constraints:
- length: { min: 8, max: 16 }
description: %s
- allowed_pattern: "[a-zA-Z]+"
description: %s
- allowed_pattern: "[a-z]+[a-zA-Z]*"
description: %s
''' % (len_desc, pattern_desc1, pattern_desc2))
tmpl = template.Template(hot_tpl)
def run_parameters(value):
tmpl.parameters(
identifier.HeatIdentifier('', "stack_testit", None),
{'db_name': value}).validate(validate_value=True)
return True
value = 'wp'
err = self.assertRaises(exception.StackValidationFailed,
run_parameters, value)
self.assertIn(len_desc, six.text_type(err))
value = 'abcdefghijklmnopq'
err = self.assertRaises(exception.StackValidationFailed,
run_parameters, value)
self.assertIn(len_desc, six.text_type(err))
value = 'abcdefgh1'
err = self.assertRaises(exception.StackValidationFailed,
run_parameters, value)
self.assertIn(pattern_desc1, six.text_type(err))
value = 'Abcdefghi'
err = self.assertRaises(exception.StackValidationFailed,
run_parameters, value)
self.assertIn(pattern_desc2, six.text_type(err))
value = 'abcdefghi'
self.assertTrue(run_parameters(value))
value = 'abcdefghI'
self.assertTrue(run_parameters(value))
def test_range_constraint(self):
range_desc = 'Value must be between 30000 and 50000'
param = {
'db_port': {
'description': 'The database port',
'type': 'number',
'default': 31000,
'constraints': [
{'range': {'min': 30000, 'max': 50000},
'description': range_desc}]}}
name = 'db_port'
schema = param['db_port']
def v(value):
param_schema = hot_param.HOTParamSchema.from_dict(name, schema)
param_schema.validate()
param_schema.validate_value(value)
return True
value = 29999
err = self.assertRaises(exception.StackValidationFailed, v, value)
self.assertIn(range_desc, six.text_type(err))
value = 50001
err = self.assertRaises(exception.StackValidationFailed, v, value)
self.assertIn(range_desc, six.text_type(err))
value = 30000
self.assertTrue(v(value))
value = 40000
self.assertTrue(v(value))
value = 50000
self.assertTrue(v(value))
def test_custom_constraint(self):
class ZeroConstraint(object):
def validate(self, value, context):
return value == "0"
env = resources.global_env()
env.register_constraint("zero", ZeroConstraint)
self.addCleanup(env.constraints.pop, "zero")
desc = 'Value must be zero'
param = {
'param1': {
'type': 'string',
'constraints': [
{'custom_constraint': 'zero',
'description': desc}]}}
name = 'param1'
schema = param['param1']
def v(value):
param_schema = hot_param.HOTParamSchema.from_dict(name, schema)
param_schema.validate()
param_schema.validate_value(value)
return True
value = "1"
err = self.assertRaises(exception.StackValidationFailed, v, value)
self.assertEqual(desc, six.text_type(err))
value = "2"
err = self.assertRaises(exception.StackValidationFailed, v, value)
self.assertEqual(desc, six.text_type(err))
value = "0"
self.assertTrue(v(value))
def test_custom_constraint_default_skip(self):
schema = {
'type': 'string',
'constraints': [{
'custom_constraint': 'skipping',
'description': 'Must be skipped on default value'
}],
'default': 'foo'
}
param_schema = hot_param.HOTParamSchema.from_dict('p', schema)
param_schema.validate()
def test_range_constraint_invalid_default(self):
range_desc = 'Value must be between 30000 and 50000'
param = {
'db_port': {
'description': 'The database port',
'type': 'number',
'default': 15,
'constraints': [
{'range': {'min': 30000, 'max': 50000},
'description': range_desc}]}}
schema = hot_param.HOTParamSchema.from_dict('db_port',
param['db_port'])
err = self.assertRaises(exception.InvalidSchemaError,
schema.validate)
self.assertIn(range_desc, six.text_type(err))
def test_validate_schema_wrong_key(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
foo: bar
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual("Invalid key 'foo' for parameter (param1)",
six.text_type(error))
def test_validate_schema_no_type(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
description: Hi!
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual("Missing parameter type for parameter: param1",
six.text_type(error))
def test_validate_schema_unknown_type(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: Unicode
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid type (Unicode)", six.text_type(error))
def test_validate_schema_constraints(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
constraints:
- allowed_valus: [foo, bar]
default: foo
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid key 'allowed_valus' for parameter constraints",
six.text_type(error))
def test_validate_schema_constraints_not_list(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
constraints: 1
default: foo
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid parameter constraints for parameter param1, "
"expected a list", six.text_type(error))
def test_validate_schema_constraints_not_mapping(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
constraints: [foo]
default: foo
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid parameter constraints, expected a mapping",
six.text_type(error))
def test_validate_schema_empty_constraints(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
constraints:
- description: a constraint
default: foo
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual("No constraint expressed", six.text_type(error))
def test_validate_schema_constraints_range_wrong_format(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: number
constraints:
- range: foo
default: foo
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid range constraint, expected a mapping",
six.text_type(error))
def test_validate_schema_constraints_range_invalid_key(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: number
constraints:
- range: {min: 1, foo: bar}
default: 1
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid key 'foo' for range constraint", six.text_type(error))
def test_validate_schema_constraints_length_wrong_format(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
constraints:
- length: foo
default: foo
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid length constraint, expected a mapping",
six.text_type(error))
def test_validate_schema_constraints_length_invalid_key(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
constraints:
- length: {min: 1, foo: bar}
default: foo
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid key 'foo' for length constraint", six.text_type(error))
def test_validate_schema_constraints_wrong_allowed_pattern(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
param1:
type: string
constraints:
- allowed_pattern: [foo, bar]
default: foo
''')
error = self.assertRaises(
exception.InvalidSchemaError, parameters.Parameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"AllowedPattern must be a string", six.text_type(error))
class TestGetAttAllAttributes(common.HeatTestCase):
scenarios = [
('test_get_attr_all_attributes', dict(
hot_tpl=hot_tpl_generic_resource_all_attrs,
snippet={'Value': {'get_attr': ['resource1']}},
expected={'Value': {'Foo': 'resource1', 'foo': 'resource1'}},
raises=None
)),
('test_get_attr_all_attributes_str', dict(
hot_tpl=hot_tpl_generic_resource_all_attrs,
snippet={'Value': {'get_attr': 'resource1'}},
expected='.Value.get_attr: Argument to "get_attr" must be a '
'list',
raises=exception.StackValidationFailed
)),
('test_get_attr_all_attributes_invalid_resource_list', dict(
hot_tpl=hot_tpl_generic_resource_all_attrs,
snippet={'Value': {'get_attr': ['resource2']}},
raises=exception.InvalidTemplateReference,
expected='The specified reference "resource2" '
'(in unknown) is incorrect.'
)),
('test_get_attr_all_attributes_invalid_type', dict(
hot_tpl=hot_tpl_generic_resource_all_attrs,
snippet={'Value': {'get_attr': {'resource1': 'attr1'}}},
raises=exception.StackValidationFailed,
expected='.Value.get_attr: Argument to "get_attr" must be a '
'list'
)),
('test_get_attr_all_attributes_invalid_arg_str', dict(
hot_tpl=hot_tpl_generic_resource_all_attrs,
snippet={'Value': {'get_attr': ''}},
raises=exception.StackValidationFailed,
expected='.Value.get_attr: Arguments to "get_attr" can be of '
'the next forms: [resource_name] or '
'[resource_name, attribute, (path), ...]'
)),
('test_get_attr_all_attributes_invalid_arg_list', dict(
hot_tpl=hot_tpl_generic_resource_all_attrs,
snippet={'Value': {'get_attr': []}},
raises=exception.StackValidationFailed,
expected='.Value.get_attr: Arguments to "get_attr" can be of '
'the next forms: [resource_name] or '
'[resource_name, attribute, (path), ...]'
)),
('test_get_attr_all_attributes_standard', dict(
hot_tpl=hot_tpl_generic_resource_all_attrs,
snippet={'Value': {'get_attr': ['resource1', 'foo']}},
expected={'Value': 'resource1'},
raises=None
)),
('test_get_attr_all_attrs_complex_attrs', dict(
hot_tpl=hot_tpl_complex_attrs_all_attrs,
snippet={'Value': {'get_attr': ['resource1']}},
expected={'Value': {'flat_dict': {'key1': 'val1',
'key2': 'val2',
'key3': 'val3'},
'list': ['foo', 'bar'],
'nested_dict': {'dict': {'a': 1,
'b': 2,
'c': 3},
'list': [1, 2, 3],
'string': 'abc'},
'none': None}},
raises=None
)),
('test_get_attr_all_attrs_complex_attrs_standard', dict(
hot_tpl=hot_tpl_complex_attrs_all_attrs,
snippet={'Value': {'get_attr': ['resource1', 'list', 1]}},
expected={'Value': 'bar'},
raises=None
)),
]
@staticmethod
def resolve(snippet, template, stack=None):
return function.resolve(template.parse(stack, snippet))
def test_get_attr_all_attributes(self):
tmpl = template.Template(self.hot_tpl)
stack = parser.Stack(utils.dummy_context(), 'test_get_attr', tmpl)
stack.store()
stack.create()
self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE),
stack.state)
rsrc = stack['resource1']
for action, status in (
(rsrc.CREATE, rsrc.IN_PROGRESS),
(rsrc.CREATE, rsrc.COMPLETE),
(rsrc.RESUME, rsrc.IN_PROGRESS),
(rsrc.RESUME, rsrc.COMPLETE),
(rsrc.SUSPEND, rsrc.IN_PROGRESS),
(rsrc.SUSPEND, rsrc.COMPLETE),
(rsrc.UPDATE, rsrc.IN_PROGRESS),
(rsrc.UPDATE, rsrc.COMPLETE),
(rsrc.SNAPSHOT, rsrc.IN_PROGRESS),
(rsrc.SNAPSHOT, rsrc.COMPLETE),
(rsrc.CHECK, rsrc.IN_PROGRESS),
(rsrc.CHECK, rsrc.COMPLETE),
(rsrc.ADOPT, rsrc.IN_PROGRESS),
(rsrc.ADOPT, rsrc.COMPLETE)):
rsrc.state_set(action, status)
if self.raises is not None:
ex = self.assertRaises(self.raises,
self.resolve, self.snippet, tmpl, stack)
self.assertEqual(self.expected, six.text_type(ex))
else:
self.assertEqual(self.expected,
self.resolve(self.snippet, tmpl, stack))
def test_stack_validate_outputs_get_all_attribute(self):
hot_liberty_tpl = template_format.parse('''
heat_template_version: 2015-10-15
resources:
resource1:
type: GenericResourceType
outputs:
all_attr:
value: {get_attr: [resource1]}
''')
stack = parser.Stack(utils.dummy_context(), 'test_outputs_get_all',
template.Template(hot_liberty_tpl))
stack.validate()
| {
"content_hash": "27641a647bf039814af0c9ab55aff7b1",
"timestamp": "",
"source": "github",
"line_count": 2800,
"max_line_length": 79,
"avg_line_length": 40.36392857142857,
"alnum_prop": 0.5334589759244021,
"repo_name": "cwolferh/heat-scratch",
"id": "b6280b90f0ca6c31fa7304a90ea3d0346dcf3cda",
"size": "113594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_hot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
} |
import sys
import os
import time
import json
from socket import gethostname
from subprocess import call
import logging
from mpi4py import MPI
from jobdescription import JobDescription
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
max_rank = comm.Get_size()
logger = logging.getLogger('Rank {0}'.format(rank))
logger.setLevel(logging.DEBUG)
debug_h = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')
debug_h.setFormatter(formatter)
debug_h.setLevel(logging.DEBUG)
error_h = logging.StreamHandler(stream=sys.stderr)
error_h.setFormatter(formatter)
error_h.setLevel(logging.ERROR)
logger.addHandler(error_h)
logger.addHandler(debug_h)
# TODO:
# Input file processing
# - read file attributes from PoolFileCatalog_H.xml
# - check validity of file by checksum
# Get payload run command
# - setup string
# - trf full commandline
# Extract results of execution
# - collect exit code, message
# - extract data from jobreport
def main():
workerAttributesFile = "worker_attributes.json"
eventStatusDumpJsonFile = "event_status.dump.json"
start_g = time.time()
start_g_str = time.asctime(time.localtime(start_g))
hostname = gethostname()
logger.info("Script statrted at {0} on {1}".format(start_g_str, hostname))
# Get a file name with job descriptions
if len(sys.argv) > 1:
input_file = sys.argv[1]
else:
input_file = 'worker_pandaids.json'
try:
in_file = open(input_file)
panda_ids = json.load(in_file)
in_file.close()
except IOError as (errno, strerror):
logger.critical("I/O error({0}): {1}".format(errno, strerror))
logger.critical("Exit from rank")
return errno
logger.debug("Collected list of jobs {0}".format(panda_ids))
logger.error("Only for test")
# PandaID of the job for the command
try:
job_id = panda_ids[rank]
except ValueError:
logger.critical("Pilot have no job: rank {0}".format(rank))
logger.critical("Exit pilot")
return 1
logger.debug("Job [{0}] will be processed".format(job_id))
os.chdir(str(job_id))
try:
job_file = open("HPCJobs.json")
jobs = json.load(job_file)
job_file.close()
except IOError as (errno, strerror):
logger.critical("I/O error({0}): {1}".format(errno, strerror))
logger.critical("Unable to open 'HPCJobs.json'")
return errno
job_dict = jobs[str(job_id)]
my_command = " ".join([job['transformation'],job['jobPars']])
my_command = my_command.strip()
logger.debug("Going to launch: {0}".format(my_command))
payloadstdout = open("stdout.txt", "w")
payloadstderr = open("stderr.txt", "w")
start_time = time.asctime(time.localtime(time.time()))
t0 = os.times()
exit_code = call(my_command, stdout=payloadstdout, stderr=payloadstderr, shell=True)
t1 = os.times()
end_time = time.asctime(time.localtime(time.time()))
t = map(lambda x, y: x - y, t1, t0)
t_tot = reduce(lambda x, y: x + y, t[2:3])
payloadstdout.close()
payloadstderr.close()
logger.info("Payload exit code: {0}".format(exit_code))
logger.info("CPU comsumption time: {0}".format(t_tot))
logger.info("Start time: {0}".format(start_time))
logger.info("End time: {0}".format(end_time))
report = open("rank_report.txt", "w")
report.write("cpuConsumptionTime: %s\n" % t_tot)
report.write("exitCode: %s" % exit_code)
report.close()
logger.info("All done")
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "f508a8e46dee29dfeafca38f9562005b",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 88,
"avg_line_length": 31.25,
"alnum_prop": 0.6565517241379311,
"repo_name": "PanDAWMS/panda-harvester",
"id": "eba9a55cbba10bf58ee23406ff1b7bf1c1e4af4e",
"size": "3647",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandaharvester/harvesterpayload/simple_wrapper_mpi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1650803"
},
{
"name": "Shell",
"bytes": "21117"
}
],
"symlink_target": ""
} |
"""Invokes git bisect to find culprit commit inside Chromium repository."""
from __future__ import print_function
import json
import re
from chromite.cli import flash
from chromite.cros_bisect import git_bisector
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import gs
from chromite.lib import retry_util
REGEX_CROS_VERSION = re.compile(r'[Rr]?(\d+)[.-](\d+)\.(\d+)\.(\d+)$')
class ChromeOnCrosBisector(git_bisector.GitBisector):
"""Bisects offending commit in Chromium repository.
Before bisect, it extracts metric scores for both last-know-good and bad
commits, for verifying regression and for setting threshold telling good from
bad commit. If last-known-good and last-known-bad CrOS version is given, it
first checks if offending commit is in Chrome by deploying last-known-bad
CrOS version's Chrome to DUT with last-known-good CrOS image installed. If it
doesn't have regression, then the offending commit is not on Chrome side.
It finds first bad commit within given good and bad commit using "git bisect"
command. For each commit, it uses builder to build package to verify and
deploy it to DUT (device under test). And asks evaluator to extract score
of the commit. It is treated as good commit i the score is closer to user
specified good commit.
Finally, it outputs git bisect result.
"""
def __init__(self, options, builder, evaluator):
"""Constructor.
Args:
options: An argparse.Namespace to hold command line arguments. Should
contain:
* cros_flash_retry: Max retry for "cros flash" command.
* cros_flash_sleep: #seconds to wait between retry.
* cros_flash_backoff: backoff factor. Must be >=1. If backoff factor
is 1, sleep_duration = sleep * num_retry. Otherwise,
sleep_duration = sleep * (backoff_factor) ** (num_retry - 1)
builder: Builder to build/deploy image. Should contain repo_dir.
evaluator: Evaluator to get score
"""
super(ChromeOnCrosBisector, self).__init__(options, builder, evaluator)
self.cros_flash_retry = max(0, options.cros_flash_retry)
self.cros_flash_sleep = max(0, options.cros_flash_sleep)
self.cros_flash_backoff = max(1, options.cros_flash_backoff)
self.good_cros_version = None
self.bad_cros_version = None
self.bisect_between_cros_version = False
if not (git.IsSHA1(options.good, full=False) and
git.IsSHA1(options.bad, full=False)):
# Postpone commit resolution to Run().
self.good_commit = None
self.bad_commit = None
self.good_cros_version = options.good
self.bad_cros_version = options.bad
self.bisect_between_cros_version = True
# Used to access gs://. Lazy initialization.
self.gs_ctx = None
@staticmethod
def CheckCommitFormat(commit):
"""Checks if commit is the acceptable format.
It accepts either SHA1 or CrOS version.
Args:
commit: commit string.
Returns:
Normalized commit. None if the format is unacceptable.
"""
if git_bisector.GitBisector.CheckCommitFormat(commit):
return commit
match_obj = REGEX_CROS_VERSION.match(commit)
if match_obj:
return 'R%s-%s.%s.%s' % match_obj.groups()
return None
def ObtainBisectBoundaryScoreImpl(self, good_side):
"""The worker of obtaining score of either last-known-good or bad commit.
Instead of deploying Chrome for good/bad commit, it deploys good/bad
CrOS image if self.bisect_between_cros_version is set.
Args:
good_side: True if it evaluates score for last-known-good. False for
last-known-bad commit.
Returns:
Evaluated score.
"""
commit = self.good_commit if good_side else self.bad_commit
commit_label = 'good' if good_side else 'bad'
# Though bisect_between_cros_version uses archived image directly without
# building Chrome, it is necessary because BuildDeployEval() will update
# self.current_commit.
self.Git(['checkout', commit])
eval_label = None
customize_build_deploy = None
if self.bisect_between_cros_version:
cros_version = (self.good_cros_version if good_side else
self.bad_cros_version)
logging.notice('Obtaining score of %s CrOS version: %s', commit_label,
cros_version)
eval_label = 'cros_%s' % cros_version
customize_build_deploy = lambda: self.FlashCrosImage(
self.GetCrosXbuddyPath(cros_version))
else:
logging.notice('Obtaining score of %s commit: %s', commit_label, commit)
return self.BuildDeployEval(eval_label=eval_label,
customize_build_deploy=customize_build_deploy)
def GetCrosXbuddyPath(self, version):
"""Composes xbuddy path.
Args:
version: CrOS version to get.
Returns:
xbuddy path of the CrOS image for board.
"""
return 'xbuddy://remote/%s/%s/test' % (self.board, version)
def ExchangeChromeSanityCheck(self):
"""Exchanges Chrome between good and bad CrOS.
It deploys last-known-good Chrome to last-known-bad CrOS DUT and vice
versa to see if regression culprit is in Chrome.
"""
def FlashBuildDeploy(cros_version):
"""Flashes DUT first then builds/deploys Chrome."""
self.FlashCrosImage(self.GetCrosXbuddyPath(cros_version))
return self.BuildDeploy()
def Evaluate(cros_version, chromium_commit):
self.Git(['checkout', chromium_commit])
score = self.BuildDeployEval(
eval_label='cros_%s_cr_%s' % (cros_version, chromium_commit),
customize_build_deploy=lambda: FlashBuildDeploy(cros_version))
label = self.LabelBuild(score)
logging.notice('Score(mean: %.3f std: %.3f). Marked as %s',
score.mean, score.std, label)
return label
logging.notice('Sanity check: exchange Chrome between good and bad CrOS '
'version.')
# Expect bad result if culprit commit is inside Chrome.
logging.notice('Obtaining score of good CrOS %s with bad Chrome %s',
self.good_cros_version, self.bad_commit)
bad_chrome_on_good_cros_label = Evaluate(self.good_cros_version,
self.bad_commit)
self.current_commit.label = 'good_cros_bad_chrome'
# Expect bad result if culprit commit is inside Chrome.
logging.notice('Obtaining score of bad CrOS %s with good Chrome %s',
self.bad_cros_version, self.good_commit)
good_chrome_on_bad_cros_label = Evaluate(self.bad_cros_version,
self.good_commit)
self.current_commit.label = 'bad_cros_good_chrome'
if (bad_chrome_on_good_cros_label != 'bad' or
good_chrome_on_bad_cros_label != 'good'):
logging.error(
'After exchanging Chrome between good/bad CrOS image, found that '
'culprit commit should not be in Chrome repository.')
logging.notice(
'Bisect log:\n' +
'\n'.join(self.CommitInfoToStr(x) for x in self.bisect_log))
return False
return True
def FlashCrosImage(self, xbuddy_path):
"""Flashes CrOS image to DUT.
It returns True when it successfully flashes image to DUT. Raises exception
when it fails after retry.
Args:
xbuddy_path: xbuddy path to CrOS image to flash.
Returns:
True
Raises:
FlashError: An unrecoverable error occured.
"""
logging.notice('cros flash %s', xbuddy_path)
@retry_util.WithRetry(
self.cros_flash_retry, log_all_retries=True,
sleep=self.cros_flash_sleep,
backoff_factor=self.cros_flash_backoff)
def flash_with_retry():
flash.Flash(self.remote, xbuddy_path, board=self.board,
clobber_stateful=True, disable_rootfs_verification=True)
flash_with_retry()
return True
def CrosVersionToChromeCommit(self, cros_version):
"""Resolves head commit of the Chrome used by the CrOS version.
Args:
cros_version: ChromeOS version, e.g. R60-9531.0.0.
Returns:
Chrome SHA. None if the ChromeOS version is not found.
"""
metadata_url = ('gs://chromeos-image-archive/%s-release/%s/'
'partial-metadata.json') % (self.board, cros_version)
try:
metadata_content = self.gs_ctx.Cat(metadata_url)
except gs.GSCommandError as e:
logging.error('Cannot load %s: %s', metadata_url, e)
return None
try:
metadata = json.loads(metadata_content)
except ValueError:
logging.error('Unable to parse %s', metadata_url)
return None
if (not metadata or 'version' not in metadata or
'chrome' not in metadata['version']):
logging.error('metadata["version"]["chrome"] does not exist in %s',
metadata_url)
return None
chrome_version = metadata['version']['chrome']
# Commit just before the branch point.
# Second line, first field.
result = self.Git(['log', '--oneline', '-n', '2', chrome_version])
if result.returncode != 0:
logging.error('Failed to run "git log %s": error: %s returncode:%s',
chrome_version, result.error, result.returncode)
return None
return result.output.splitlines()[1].split()[0]
def ResolveChromeBisectRangeFromCrosVersion(self):
"""Resolves Chrome bisect range given good and bad CrOS versions.
It sets up self.good_commit and self.bad_commit, which are derived from
self.good_cros_version and self.bad_cros_version, respectively.
Returns:
False if either good_commit or bad_commit failed to resolve. Otherwise,
True.
"""
self.good_commit = self.CrosVersionToChromeCommit(self.good_cros_version)
if self.good_commit:
logging.info('Latest Chrome commit of good CrOS version %s: %s',
self.good_cros_version, self.good_commit)
else:
logging.error('Cannot find metadata for CrOS version: %s',
self.good_cros_version)
return False
self.bad_commit = self.CrosVersionToChromeCommit(self.bad_cros_version)
if self.bad_commit:
logging.info('Latest Chrome commit of bad CrOS version %s: %s',
self.bad_cros_version, self.bad_commit)
else:
logging.error('Cannot find metadata for CrOS version: %s',
self.bad_cros_version)
return False
return True
def PrepareBisect(self):
"""Performs sanity checks and obtains bisect boundary score before bisect.
Returns:
False if there's something wrong.
"""
if self.bisect_between_cros_version:
# Lazy initialization.
self.gs_ctx = gs.GSContext()
self.builder.SyncToHead(fetch_tags=True)
if not self.ResolveChromeBisectRangeFromCrosVersion():
return None
if not (self.SanityCheck() and
self.ObtainBisectBoundaryScore() and
self.GetThresholdFromUser()):
return False
if self.bisect_between_cros_version:
if not self.ExchangeChromeSanityCheck():
return False
return True
| {
"content_hash": "2480192f3bdc8312255662ac230028d8",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 79,
"avg_line_length": 36.78217821782178,
"alnum_prop": 0.6633467922835352,
"repo_name": "endlessm/chromium-browser",
"id": "e34efba675014040b1f9670c744fac1495ccbc57",
"size": "11335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/cros_bisect/chrome_on_cros_bisector.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Analytic policy gradient training."""
import functools
import time
from typing import Callable, Optional, Tuple
from absl import logging
from brax import envs
from brax.envs import wrappers
from brax.training import acting
from brax.training import pmap
from brax.training import types
from brax.training.acme import running_statistics
from brax.training.acme import specs
from brax.training.agents.apg import networks as apg_networks
from brax.training.types import Params
from brax.training.types import PRNGKey
import flax
import jax
import jax.numpy as jnp
import optax
InferenceParams = Tuple[running_statistics.NestedMeanStd, Params]
Metrics = types.Metrics
_PMAP_AXIS_NAME = 'i'
@flax.struct.dataclass
class TrainingState:
"""Contains training state for the learner."""
optimizer_state: optax.OptState
normalizer_params: running_statistics.RunningStatisticsState
policy_params: Params
def _unpmap(v):
return jax.tree_util.tree_map(lambda x: x[0], v)
def train(environment: envs.Env,
episode_length: int,
action_repeat: int = 1,
num_envs: int = 1,
max_devices_per_host: Optional[int] = None,
num_eval_envs: int = 128,
learning_rate: float = 1e-4,
seed: int = 0,
truncation_length: Optional[int] = None,
max_gradient_norm: float = 1e9,
num_evals: int = 1,
normalize_observations: bool = False,
deterministic_eval: bool = False,
network_factory: types.NetworkFactory[
apg_networks.APGNetworks] = apg_networks.make_apg_networks,
progress_fn: Callable[[int, Metrics], None] = lambda *args: None,
eval_env: Optional[envs.Env] = None):
"""Direct trajectory optimization training."""
xt = time.time()
process_count = jax.process_count()
process_id = jax.process_index()
local_device_count = jax.local_device_count()
local_devices_to_use = local_device_count
if max_devices_per_host:
local_devices_to_use = min(local_devices_to_use, max_devices_per_host)
logging.info(
'Device count: %d, process count: %d (id %d), local device count: %d, '
'devices to be used count: %d', jax.device_count(), process_count,
process_id, local_device_count, local_devices_to_use)
device_count = local_devices_to_use * process_count
if truncation_length is not None:
assert truncation_length > 0
num_evals_after_init = max(num_evals - 1, 1)
assert num_envs % device_count == 0
env = environment
env = wrappers.wrap_for_training(
env, episode_length=episode_length, action_repeat=action_repeat)
normalize = lambda x, y: x
if normalize_observations:
normalize = running_statistics.normalize
apg_network = network_factory(
env.observation_size,
env.action_size,
preprocess_observations_fn=normalize)
make_policy = apg_networks.make_inference_fn(apg_network)
optimizer = optax.adam(learning_rate=learning_rate)
def env_step(carry: Tuple[envs.State, PRNGKey], step_index: int,
policy: types.Policy):
env_state, key = carry
key, key_sample = jax.random.split(key)
actions = policy(env_state.obs, key_sample)[0]
nstate = env.step(env_state, actions)
if truncation_length is not None:
nstate = jax.lax.cond(
jnp.mod(step_index + 1, truncation_length) == 0.,
jax.lax.stop_gradient, lambda x: x, nstate)
return (nstate, key), (nstate.reward, env_state.obs)
def loss(policy_params, normalizer_params, key):
key_reset, key_scan = jax.random.split(key)
env_state = env.reset(
jax.random.split(key_reset, num_envs // process_count))
f = functools.partial(
env_step, policy=make_policy((normalizer_params, policy_params)))
(rewards,
obs) = jax.lax.scan(f, (env_state, key_scan),
(jnp.array(range(episode_length // action_repeat))))[1]
return -jnp.mean(rewards), obs
loss_grad = jax.grad(loss, has_aux=True)
def clip_by_global_norm(updates):
g_norm = optax.global_norm(updates)
trigger = g_norm < max_gradient_norm
return jax.tree_util.tree_map(
lambda t: jnp.where(trigger, t, (t / g_norm) * max_gradient_norm),
updates)
def training_epoch(training_state: TrainingState, key: PRNGKey):
key, key_grad = jax.random.split(key)
grad, obs = loss_grad(training_state.policy_params,
training_state.normalizer_params, key_grad)
grad = clip_by_global_norm(grad)
grad = jax.lax.pmean(grad, axis_name='i')
params_update, optimizer_state = optimizer.update(
grad, training_state.optimizer_state)
policy_params = optax.apply_updates(training_state.policy_params,
params_update)
normalizer_params = running_statistics.update(
training_state.normalizer_params, obs, pmap_axis_name=_PMAP_AXIS_NAME)
metrics = {
'grad_norm': optax.global_norm(grad),
'params_norm': optax.global_norm(policy_params)
}
return TrainingState(
optimizer_state=optimizer_state,
normalizer_params=normalizer_params,
policy_params=policy_params), metrics
training_epoch = jax.pmap(training_epoch, axis_name=_PMAP_AXIS_NAME)
training_walltime = 0
# Note that this is NOT a pure jittable method.
def training_epoch_with_timing(training_state: TrainingState,
key: PRNGKey) -> Tuple[TrainingState, Metrics]:
nonlocal training_walltime
t = time.time()
(training_state, metrics) = training_epoch(training_state, key)
metrics = jax.tree_util.tree_map(jnp.mean, metrics)
jax.tree_util.tree_map(lambda x: x.block_until_ready(), metrics)
epoch_training_time = time.time() - t
training_walltime += epoch_training_time
sps = (episode_length * num_envs) / epoch_training_time
metrics = {
'training/sps': sps,
'training/walltime': training_walltime,
**{f'training/{name}': value for name, value in metrics.items()}
}
return training_state, metrics
key = jax.random.PRNGKey(seed)
global_key, local_key = jax.random.split(key)
del key
local_key = jax.random.fold_in(local_key, process_id)
local_key, eval_key = jax.random.split(local_key)
# The network key should be global, so that networks are initialized the same
# way for different processes.
policy_params = apg_network.policy_network.init(global_key)
del global_key
training_state = TrainingState(
optimizer_state=optimizer.init(policy_params),
policy_params=policy_params,
normalizer_params=running_statistics.init_state(
specs.Array((env.observation_size,), jnp.float32)))
training_state = jax.device_put_replicated(
training_state,
jax.local_devices()[:local_devices_to_use])
if not eval_env:
eval_env = env
else:
eval_env = wrappers.wrap_for_training(
eval_env, episode_length=episode_length, action_repeat=action_repeat)
evaluator = acting.Evaluator(
eval_env,
functools.partial(make_policy, deterministic=deterministic_eval),
num_eval_envs=num_eval_envs,
episode_length=episode_length,
action_repeat=action_repeat,
key=eval_key)
# Run initial eval
if process_id == 0 and num_evals > 1:
metrics = evaluator.run_evaluation(
_unpmap(
(training_state.normalizer_params, training_state.policy_params)),
training_metrics={})
logging.info(metrics)
progress_fn(0, metrics)
for it in range(num_evals_after_init):
logging.info('starting iteration %s %s', it, time.time() - xt)
# optimization
epoch_key, local_key = jax.random.split(local_key)
epoch_keys = jax.random.split(epoch_key, local_devices_to_use)
(training_state,
training_metrics) = training_epoch_with_timing(training_state, epoch_keys)
if process_id == 0:
# Run evals.
metrics = evaluator.run_evaluation(
_unpmap(
(training_state.normalizer_params, training_state.policy_params)),
training_metrics)
logging.info(metrics)
progress_fn(it + 1, metrics)
# If there was no mistakes the training_state should still be identical on all
# devices.
pmap.assert_is_replicated(training_state)
params = _unpmap(
(training_state.normalizer_params, training_state.policy_params))
pmap.synchronize_hosts()
return (make_policy, params, metrics)
| {
"content_hash": "e57ba51b050ebdba81753c55f285ba96",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 80,
"avg_line_length": 35.28333333333333,
"alnum_prop": 0.6726499763816722,
"repo_name": "google/brax",
"id": "666dd6174e1860596dbe8e61084ce13ae50b2198",
"size": "9050",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "brax/training/agents/apg/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "27572"
},
{
"name": "Jupyter Notebook",
"bytes": "8554172"
},
{
"name": "Python",
"bytes": "1189091"
}
],
"symlink_target": ""
} |
import numpy as np
import os
import ntpath
import time
from . import util
from . import html
import scipy.misc
from io import BytesIO
def save_images(webpage, visuals, image_path, win_size=512):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
if label.startswith('output'):
fulllabel = label
label = 'output'
else:
fulllabel = label
image_name = '%s_%s.jpg' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(fulllabel)
links.append(image_name)
webpage.add_images(ims, txts, links, width=win_size)
| {
"content_hash": "76e87371fd9a7148707a6b44671a49ad",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 60,
"avg_line_length": 25.060606060606062,
"alnum_prop": 0.6723095525997581,
"repo_name": "google/tim-gan",
"id": "117383a0b92d5b2084b9f6d9093f315d7ed4bad1",
"size": "1501",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "util/visualizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Create a aggregation service.
"""
from twisted.application import service
from twisted.python import usage
from twisted.words.protocols.jabber.jid import JID
from wokkel import component, pubsub
from wokkel.generic import FallbackHandler
from wokkel.iwokkel import IXMPPHandler
from mimir.aggregator import aggregator
class Options(usage.Options):
optParameters = [
('feeds', None, 'feeds', 'Directory that holds the list of feeds'),
('jid', None, 'aggregator', 'JID of this component'),
('secret', None, 'secret', 'Secret to connect to upstream server'),
('rhost', None, '127.0.0.1', 'Upstream server address'),
('rport', None, '5347', 'Upstream server port'),
('service', None, None, 'Publish subscribe service JID'),
('web-port', None, None, 'Port to listen for HTTP interface service'),
]
optFlags = [
('verbose', 'v', 'Show traffic'),
]
def postOptions(self):
try:
self['rport'] = int(self['rport'])
except ValueError:
pass
def makeService(config):
s = service.MultiService()
# create XMPP external component
cs = component.Component(config['rhost'], config['rport'],
config['jid'], config['secret'])
cs.setServiceParent(s)
# wait for no more than 15 minutes to try to reconnect
cs.factory.maxDelay = 900
if config["verbose"]:
cs.logTraffic = True
FallbackHandler().setHandlerParent(cs)
# set up publish-subscribe client handler
publisher = pubsub.PubSubClient()
publisher.setHandlerParent(cs)
# create aggregation service
storage = aggregator.FileFeedStorage(config['feeds'])
ag = aggregator.AggregatorService(storage)
ag.setServiceParent(s)
# set up feed handler from publisher
ag.handler = aggregator.IFeedHandler(publisher)
ag.handler.service = JID(config['service'])
# set up XMPP handler to interface with aggregator
IXMPPHandler(ag).setHandlerParent(cs)
# set up site to interface with aggregator
from twisted.application import internet
from twisted.web2 import channel, resource, server
root = resource.Resource()
root.child_setfeed = aggregator.AddFeedResource(ag)
site = server.Site(root)
w = internet.TCPServer(int(config['web-port']), channel.HTTPFactory(site))
w.setServiceParent(s)
return s
| {
"content_hash": "5cc30c7c7de09733a7c12c8dc34e8252",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 30.1625,
"alnum_prop": 0.6692913385826772,
"repo_name": "ralphm/mimir",
"id": "c62c94826a52d289a67f33cd6a666ef3ad85dbf8",
"size": "2479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mimir/aggregator/tap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57938"
}
],
"symlink_target": ""
} |
import codecs
import datetime
import json
import os.path
import requests
import subprocess
import time
def generate(data):
team = data['player']['team']['name']
code = data['player']['spotcode']
qr_url = "http://fusiongame.tk/s/%s" % data['player']['touchcode']
pngpath = "spotter_printer/gen/%s.png" % code
svgpath = "spotter_printer/gen/%s.svg" % code
pdfpath = "spotter_printer/gen/%s.pdf" % code
replacements = {
'$playername$': data['player']['name'],
'$code$': data['player']['spotcode'],
'$touchcode$': data['player']['touchcode'],
'$team1$' : "%s %s" % (data['teamScores'][0]['name'], data['teamScores'][0]['score']),
'$team2$' : "%s %s" % (data['teamScores'][1]['name'], data['teamScores'][1]['score']),
'test.png': '%s.png' % code,
'$team$': team,
}
for i, line in enumerate(data['eventlist']):
replacements['$eventlist%s$' % i] = line
for i in range(len(data['eventlist']), 11):
replacements['$eventlist%s$' % i] = ''
replacements['$time$'] = datetime.datetime.now().strftime("%H:%M:%S")
# Read SVG template to memory
openpath = 'spotter_printer/template_portrait.svg'
if data['player']['team']['name'].lower() == 'sinised':
openpath = 'spotter_printer/template_landscape.svg'
f = codecs.open(openpath, 'rb', encoding='utf8')
svg = f.read()
f.close()
# Replace texts in SVG
for marker in replacements:
value = replacements[marker]
svg = svg.replace(marker, str(value))
# Replace events data
# line = 1
# for row in data['lastevents'].split("\n"):
# left, action, right = row.split(">")
# svg = svg.replace("left%d" % line, left)
# svg = svg.replace("right%d" % line, right)
# a = """ xlink:href="#blank"
# id="icon%s"
#""" % (line)
# b = """ xlink:href="#%s"
# id="icon%s"
#""" % (action, line)
# svg = svg.replace(a, b)
# line += 1
# if line > 5:
# break
# Write modified SVG to a new file
f = codecs.open(svgpath, 'wb', encoding='utf8')
f.write(svg)
f.close()
# Generate QR code
subprocess.call('qr "%s" > %s' % (qr_url, pngpath), shell=True)
# Generate PDF
subprocess.call('inkscape --without-gui --export-pdf="%s" "%s"' % (pdfpath, svgpath), shell=True)
return pdfpath
| {
"content_hash": "56381d5605ea1188fe011582a135bacb",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 101,
"avg_line_length": 30.0125,
"alnum_prop": 0.5610162432319866,
"repo_name": "mahfiaz/spotter_irl",
"id": "547536fb7fc8cdb956114dc440dfb87511c33ae0",
"size": "2449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spotter_printer/svg.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "14045"
},
{
"name": "HTML",
"bytes": "24723"
},
{
"name": "JavaScript",
"bytes": "55397"
},
{
"name": "Python",
"bytes": "103318"
}
],
"symlink_target": ""
} |
from morphforge.morphology.errors import MorphologyFrameworkRegistrationError
class MorphologyExporter(object):
method_name_prefix = 'to'
@classmethod
def register(cls, method_name, export_functor, from_type, allow_override=False):
if not isinstance(method_name, basestring):
raise MorphologyFrameworkRegistrationError("method_name must be a string")
if not method_name.startswith(cls.method_name_prefix):
raise MorphologyFrameworkRegistrationError("method_name must begin with '%s' "% cls.method_name_prefix)
if hasattr(from_type, method_name) and not allow_override:
err = "Existing export functor defined for: %s.%s" % (from_type.__name__, method_name)
err += "(Perhaps use the 'allow_override' parameter on this function-call?)"
raise MorphologyFrameworkRegistrationError(err)
setattr(from_type, method_name, export_functor)
| {
"content_hash": "ddfdf13ced68010e2093a02642e998a0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 115,
"avg_line_length": 39.291666666666664,
"alnum_prop": 0.7020148462354189,
"repo_name": "mikehulluk/morphforge",
"id": "8b7dcd470dd628e9987fdb5e96fe3941e25ef150",
"size": "2482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morphforge/morphology/exporter/morphologyexporter.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "4818"
},
{
"name": "C",
"bytes": "1499"
},
{
"name": "Makefile",
"bytes": "4436"
},
{
"name": "Python",
"bytes": "1557833"
},
{
"name": "Shell",
"bytes": "14"
},
{
"name": "XSLT",
"bytes": "94266"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="layout.xaxis.title", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
**kwargs,
)
| {
"content_hash": "7b95118dfc25068343d1511455bc8d4c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 87,
"avg_line_length": 36.36363636363637,
"alnum_prop": 0.62,
"repo_name": "plotly/plotly.py",
"id": "d1d6cb26336feecd2a7024d2ff2f61cce0b443ce",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/xaxis/title/_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from __future__ import print_function, absolute_import
import os
import six
import subprocess
from .base import CapPA
from .enums import IS_MAC
class YarnG(CapPA):
def __init__(self, *flags):
super(YarnG, self).__init__(*flags)
self.name = 'yarn'
self.friendly_name = 'yarn'
def _install_package_dict(self, packages):
range_connector_gte = ">="
range_connector_lt = "<"
connector = '@'
manager = self.find_executable()
args = []
if not IS_MAC:
args.append('sudo')
args.append('-E')
args.extend([manager, 'global', 'add'])
for package, version in six.iteritems(packages):
if version is None:
args.append(package)
elif isinstance(version, list):
args.append(package + range_connector_gte + version[0] + ',' + range_connector_lt + version[1])
else:
args.append(package + connector + version)
subprocess.check_call(args, env=os.environ)
| {
"content_hash": "1344043db4a0fd27fdabd942dc173b6a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 111,
"avg_line_length": 30.02857142857143,
"alnum_prop": 0.570884871550904,
"repo_name": "Captricity/cappa",
"id": "907cbccf9d71e64f8dad9188a7ee27e6fbe6257e",
"size": "1051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cappa/yarng.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40966"
},
{
"name": "Ruby",
"bytes": "5228"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from django.db import models
from django.utils.timezone import now
from categories.models import Category
class Timer(models.Model):
category = models.ForeignKey(Category)
name = models.CharField(
verbose_name='Name',
max_length=100,
)
active = models.BooleanField(
verbose_name='Active',
default=False,
db_index=True,
)
def start(self):
interval = Interval()
interval.timer = self
interval.save()
self.active = True
self.save()
def stop(self):
interval = Interval.objects.get(
timer=self,
end=None,
)
interval.end = now()
interval.save()
self.active = False
self.save()
@property
def today(self):
"""
Today's total time.
"""
today = now()
intervals = Interval.objects.filter(
timer=self,
start__year=today.year,
start__month=today.month,
start__day=today.day,
)
total = timedelta(0)
for interval in intervals:
total += interval.length
return total
@property
def last_week(self):
"""
Last week's total time.
"""
year, week, dow = (now() - timedelta(days=7)).isocalendar()
intervals = Interval.objects.filter(
timer=self,
start__year=year,
week=week,
)
total = timedelta(0)
for interval in intervals:
total += interval.length
return total
class Interval(models.Model):
timer = models.ForeignKey(Timer)
week = models.IntegerField(
verbose_name='Week',
db_index=True,
)
year = models.IntegerField(
verbose_name='Year',
db_index=True,
)
start = models.DateTimeField(
verbose_name='Start Time',
auto_now_add=True,
)
end = models.DateTimeField(
verbose_name='End Time',
blank=True,
null=True,
)
notes = models.CharField(
verbose_name='Notes',
max_length=1000,
blank=True,
null=True,
)
def __init__(self, *args, **kwargs):
today = now()
year, week, dow = today.isocalendar()
kwargs['week'] = week
kwargs['year'] = today.year
kwargs['start'] = now()
return super().__init__(*args, **kwargs)
@property
def length(self):
end = self.end or now()
return end - self.start
| {
"content_hash": "b34e7c52fe372f2a0a035c7d03458341",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 67,
"avg_line_length": 21.625,
"alnum_prop": 0.5348747591522158,
"repo_name": "peap/arzeit",
"id": "e23378f9b485a05485bccbcbced971f4d5c034e1",
"size": "2595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timers/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3037"
},
{
"name": "Python",
"bytes": "12887"
}
],
"symlink_target": ""
} |
from django.core.management import BaseCommand
from django.conf import settings
import os, datetime, time
from account.models import PhoneUserProfile
from excel.models import Excel
from django.contrib.auth.models import User
from django.core.files import File
from excel.views import index_excel
from excel.models import CrawlExcel
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print '开始导入资源单...'
crawl_excels = CrawlExcel.objects.filter(imported=False)
print '%s 个excel需要导入' % crawl_excels.count()
for crawl_excel in crawl_excels:
excel = Excel.objects.create(
create_time = crawl_excel.create_time,
user = crawl_excel.crawl_user,
name = os.path.basename(crawl_excel.filepath),
provider = crawl_excel.provider,
status = 0,
excel_file = File(file(crawl_excel.filepath))
)
try:
index_excel(excel)
print 'indexed %s'%excel.id
except:
print 'wrong %s'%excel.id
crawl_excel.imported = True
crawl_excel.save()
| {
"content_hash": "9a4308af39cd293240489ce7e32c313d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 64,
"avg_line_length": 28.902439024390244,
"alnum_prop": 0.6092827004219409,
"repo_name": "stone5495/zebra",
"id": "4ba9ee2b2297147dfd2a756440727ff4a9b277bd",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/excel/management/commands/import_excel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30"
},
{
"name": "HTML",
"bytes": "16980"
},
{
"name": "Python",
"bytes": "87002"
}
],
"symlink_target": ""
} |
from django.db.transaction import atomic
from api import status as scode
from api.fields import get_boolean_value
from api.utils.db import get_listitem
from api.utils.request import set_request_method
from api.exceptions import OperationNotSupported
from api.task.response import SuccessTaskResponse, FailureTaskResponse
from api.vm.define.utils import is_vm_operational
from api.vm.define.api_views import VmDefineBaseView
from api.vm.define.serializers import VmDefineDiskSerializer
from api.vm.messages import LOG_DISK_CREATE, LOG_DISK_UPDATE, LOG_DISK_DELETE
DISK_ID_MIN = 0
DISK_ID_MAX = 1 # Bug #chili-462
DISK_ID_MAX_BHYVE = 5
DISK_ID_MAX_OS = 1
def _disk_params(fun):
"""Decorator for disk functions below"""
def wrap(view, vm, disk_id, *args, **kwargs):
if disk_id is None and view.diff:
return SuccessTaskResponse(view.request, view.get_diff(vm))
if view.active:
vm.revert_active(json_only=True)
if disk_id is None:
disk = vm.json_get_disks()
disks = None
kwargs['many'] = True
else:
if vm.is_bhyve():
disk_id_max = DISK_ID_MAX_BHYVE
elif vm.is_hvm():
disk_id_max = DISK_ID_MAX
else:
disk_id_max = DISK_ID_MAX_OS
disks, disk = get_listitem(view.request, vm.json_get_disks(), disk_id, name='VM disk',
max_value=disk_id_max, min_value=DISK_ID_MIN)
return fun(view, vm, disk_id, disks, disk, *args, **kwargs)
return wrap
class VmDefineDiskView(VmDefineBaseView):
@staticmethod
def _image_tags_inherit(data):
return data is None or get_boolean_value(data.get('image_tags_inherit', True))
def _set_vm_tags(self, vm, tags, task_id=None):
from api.vm.define.vm_define import VmDefineView
request = set_request_method(self.request, 'PUT')
VmDefineView(request).put(vm, {'tags': list(tags)}, task_id=task_id)
def _update_vm_tags(self, vm, img, img_old, data, task_id=None):
if self._image_tags_inherit(data) and (img or img_old) and (img != img_old):
vm_tags = set(vm.tag_list)
vm_tags_new = vm_tags.copy()
if img_old:
img_old_tags = set(img_old.tags)
if img_old_tags.issubset(vm_tags):
vm_tags_new = vm_tags - img_old_tags
if img:
vm_tags_new.update(img.tags)
if vm_tags != vm_tags_new:
self._set_vm_tags(vm, vm_tags_new, task_id=task_id)
def _delete_vm_tags(self, vm, img, data, task_id=None):
if img and self._image_tags_inherit(data):
img_tags, vm_tags = set(img.tags), set(vm.tag_list)
if img_tags and img_tags.issubset(vm_tags):
self._set_vm_tags(vm, list(vm_tags - img_tags), task_id=task_id)
def get_diff(self, vm):
"""Show disk differences between active and in db json. Implies full and denies active vm_define_disk."""
def_current = VmDefineDiskSerializer(self.request, vm, vm.json_get_disks(), disk_id=None, many=True).data
def_active = VmDefineDiskSerializer(self.request, vm, vm.json_active_get_disks(), disk_id=None, many=True).data
return self._diff_lists(def_active, def_current)
# noinspection PyUnusedLocal
@_disk_params
def get(self, vm, disk_id, disks, disk, data, many=False):
"""Get VM disk definition"""
ser = VmDefineDiskSerializer(self.request, vm, disk, disk_id=disk_id, many=many)
return SuccessTaskResponse(self.request, ser.data, vm=vm)
# noinspection PyUnusedLocal
@is_vm_operational
@atomic
@_disk_params
def post(self, vm, disk_id, disks, disk, data):
"""Create VM nic definition"""
if not vm.is_hvm() and vm.is_deployed():
raise OperationNotSupported
ser = VmDefineDiskSerializer(self.request, vm, disk_id=disk_id, data=data)
if not ser.is_valid():
return FailureTaskResponse(self.request, ser.errors, vm=vm)
disks[disk_id] = ser.jsondata
vm.save_disks(disks, update_node_resources=ser.update_node_resources,
update_storage_resources=ser.update_storage_resources)
res = SuccessTaskResponse(self.request, ser.data, status=scode.HTTP_201_CREATED, vm=vm, msg=LOG_DISK_CREATE,
detail='disk_id=' + str(disk_id + 1), detail_dict=ser.detail_dict())
self._update_vm_tags(vm, ser.img, ser.img_old, data, task_id=res.data.get('task_id'))
return res
@is_vm_operational
@atomic
@_disk_params
def put(self, vm, disk_id, disks, disk, data):
"""Update VM disk definition"""
ser = VmDefineDiskSerializer(self.request, vm, disk.copy(), disk_id=disk_id, data=data, partial=True)
if not ser.is_valid():
return FailureTaskResponse(self.request, ser.errors, vm=vm)
disks[disk_id] = ser.jsondata
vm.save_disks(disks, update_node_resources=ser.update_node_resources,
update_storage_resources=ser.update_storage_resources)
res = SuccessTaskResponse(self.request, ser.data, vm=vm, msg=LOG_DISK_UPDATE,
detail='disk_id=' + str(disk_id + 1), detail_dict=ser.detail_dict())
self._update_vm_tags(vm, ser.img, ser.img_old, data, task_id=res.data.get('task_id'))
return res
# noinspection PyUnusedLocal
@is_vm_operational
@atomic
@_disk_params
def delete(self, vm, disk_id, disks, disk, data):
"""Delete VM disk definition"""
if not vm.is_hvm() and (disk_id == 0 or vm.is_deployed()):
raise OperationNotSupported
ser = VmDefineDiskSerializer(self.request, vm, disk, disk_id=disk_id)
del disks[disk_id]
ns = ser.get_node_storage(ser.object.get('zpool'), vm.node)
if ns:
update_storage_resources = [ns]
else:
update_storage_resources = []
vm.save_disks(disks, update_node_resources=True, update_storage_resources=update_storage_resources)
res = SuccessTaskResponse(self.request, None, vm=vm, detail='disk_id=' + str(disk_id + 1), msg=LOG_DISK_DELETE)
self._delete_vm_tags(vm, ser.img_old, data, task_id=res.data.get('task_id'))
return res
| {
"content_hash": "5ecb62d970399ab9ef97fdb28862c00a",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 119,
"avg_line_length": 39.1219512195122,
"alnum_prop": 0.6200124688279302,
"repo_name": "erigones/esdc-ce",
"id": "1dbaf091721e7fa9db8af0653642046ad4da3c12",
"size": "6416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/vm/define/vm_define_disk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
} |
import argparse
import os
import locale
from pyp2rpm.convertor import Convertor
from pyp2rpm import settings
def main():
parser = argparse.ArgumentParser(description = 'Convert PyPI package to RPM specfile.')
parser.add_argument('-n',
required = False,
help = 'Name of the package on PyPI (ignored for local files).',
metavar = 'PYPI_NAME')
parser.add_argument('-v',
required = False,
help = 'Version of the package to download (ignored for local files).',
metavar = 'VERSION')
parser.add_argument('-m',
required = False,
help = 'Where to get metadata from ("pypi" or "local", default: "{0}").'.format(settings.DEFAULT_METADATA_SOURCE),
metavar = 'METADATA_SOURCE',
choices = ['pypi', 'local'],
default = settings.DEFAULT_METADATA_SOURCE)
parser.add_argument('-s',
required = False,
help = 'Where to get package from ("pypi" or "/full/path/to/local/file", default: "{0}").'.format(settings.DEFAULT_PKG_SOURCE),
metavar = 'PACKAGE_SOURCE',
default = settings.DEFAULT_PKG_SOURCE)
parser.add_argument('-d',
required = False,
help = 'Where to save the package file (default: "{0}")'.format(settings.DEFAULT_PKG_SAVE_PATH),
metavar = 'SAVE_DIR',
default = settings.DEFAULT_PKG_SAVE_PATH)
parser.add_argument('-r',
required = False,
help = 'Name of rpm package (overrides calculated name)',
metavar = 'RPM_NAME',
default = None)
parser.add_argument('-t',
required = False,
help = 'Template file (jinja2 format) to render (default: "{0}"). Search order is 1) filesystem, 2) default templates.'.format(settings.DEFAULT_TEMPLATE),
metavar = 'TEMPLATE') # no default, because we need to know, whether this was specified or not
parser.add_argument('-o',
required = False,
help = 'Default distro whose conversion rules to use (default: "{0}"). Default templates have their rules associated and ignore this.'.format(settings.DEFAULT_DISTRO),
metavar = 'DISTRO',
default = settings.DEFAULT_DISTRO,
choices = settings.KNOWN_DISTROS)
parser.add_argument('-b',
required = False,
help = 'Base Python version to package for (default: "{0}").'.format(settings.DEFAULT_PYTHON_VERSION),
metavar = 'BASE_PYTHON',
default = settings.DEFAULT_PYTHON_VERSION)
parser.add_argument('-p',
required = False,
help = 'Additional Python versions to include in the specfile (e.g -p3 for %%{?with_python3}). Can be specified multiple times.',
metavar = 'PYTHON_VERSION',
default = [],
action = 'append')
ns = parser.parse_args()
if ns.__dict__['n'] == None and not os.path.exists(ns.__dict__['s']):
parser.error('You must specify name of the package (-n) or full path (-s).')
distro = ns.__dict__['o']
if ns.__dict__['t'] in settings.KNOWN_DISTROS:
distro = ns.__dict__['t']
convertor = Convertor(name = ns.__dict__['n'],
version = ns.__dict__['v'],
metadata_from = ns.__dict__['m'],
source_from = ns.__dict__['s'],
save_dir = ns.__dict__['d'],
template = ns.__dict__['t'] or settings.DEFAULT_TEMPLATE,
distro = distro,
base_python_version = ns.__dict__['b'],
python_versions = ns.__dict__['p'],
rpm_name = ns.__dict__['r'])
converted = convertor.convert()
if isinstance(converted, str): # python 3
print(converted)
else: # python 2
print(converted.encode(locale.getpreferredencoding()))
| {
"content_hash": "b0aed461113ca8003f6cbd648cc6ab1d",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 191,
"avg_line_length": 51.883720930232556,
"alnum_prop": 0.498655311519498,
"repo_name": "henrysher/spec4pypi",
"id": "a3692d455bc31c6c26f23251e93866992bc6feec",
"size": "4462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyp2rpm/bin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74505"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0012_subscription_purchase_date'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='subscriber',
field=models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "8bc5e34c2320ccaa9f779c93fd46b9e0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 129,
"avg_line_length": 28.11111111111111,
"alnum_prop": 0.6679841897233202,
"repo_name": "aehlke/manabi",
"id": "61cad80997c558371c61a4d6f3c1b2bff8880fb7",
"size": "553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manabi/apps/subscriptions/migrations/0013_auto_20190416_0103.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60000"
},
{
"name": "HTML",
"bytes": "287098"
},
{
"name": "JavaScript",
"bytes": "260813"
},
{
"name": "Jinja",
"bytes": "152668"
},
{
"name": "PowerShell",
"bytes": "935"
},
{
"name": "Python",
"bytes": "5129354"
},
{
"name": "Ruby",
"bytes": "5722"
},
{
"name": "SCSS",
"bytes": "25268"
},
{
"name": "Shell",
"bytes": "3041"
}
],
"symlink_target": ""
} |
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from frame_logging/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = '0.2.0'
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='frame_logging',
version=version,
description="""Your project description goes here""",
long_description=readme + '\n\n' + history,
author='Romain DA COSTA VIEIRA',
author_email='[email protected]',
url='https://github.com/ItsfBisounours/frame_logging',
packages=[
'frame_logging',
],
include_package_data=True,
install_requires=[],
license="MIT",
zip_safe=False,
keywords='frame_logging',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| {
"content_hash": "de666739ffebf8ac533697015c09e4e0",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 70,
"avg_line_length": 29.91891891891892,
"alnum_prop": 0.6016260162601627,
"repo_name": "ItsfBisounours/frame_logging",
"id": "495ed22f2262cbcba3422991f7a8bf25bc8d953c",
"size": "2260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "14838"
}
],
"symlink_target": ""
} |
import datetime
import pandas as pd
class QuandlFetcher(object):
API_URL = 'http://www.quandl.com/api/v1/'
def __init__(self, auth_token=None):
self.auth_token = auth_token
def _append_query_fields(self, url, **kwargs):
field_values = ['{0}={1}'.format(key, val)
for key, val in kwargs.items() if val]
return url + 'request_source=python&request_version=2&' +'&'.join(field_values)
def _parse_dates(self, date):
if date is None:
return date
if isinstance(date, datetime.datetime):
return date.date().isoformat()
if isinstance(date, datetime.date):
return date.isoformat()
try:
date = pd.to_datetime(date)
except ValueError:
raise ValueError("{} is not recognised a date.".format(date))
return date.date().isoformat()
def build_url(self, dataset, **kwargs):
"""Return dataframe of requested dataset from Quandl.
:param dataset: str or list, depending on single dataset usage or multiset usage
Dataset codes are available on the Quandl website
:param str trim_start, trim_end: Optional datefilers, otherwise entire
dataset is returned
:param str collapse: Options are daily, weekly, monthly, quarterly, annual
:param str transformation: options are diff, rdiff, cumul, and normalize
:param int rows: Number of rows which will be returned
:param str sort_order: options are asc, desc. Default: `asc`
:param str text: specify whether to print output text to stdout, pass 'no' to supress output.
:returns: :class:`pandas.DataFrame` or :class:`numpy.ndarray`
Note that Pandas expects timeseries data to be sorted ascending for most
timeseries functionality to work.
Any other `kwargs` passed to `get` are sent as field/value params to Quandl
with no interference.
"""
auth_token = self.auth_token
kwargs.setdefault('sort_order', 'asc')
trim_start = self._parse_dates(kwargs.pop('trim_start', None))
trim_end = self._parse_dates(kwargs.pop('trim_end', None))
#Check whether dataset is given as a string (for a single dataset) or an array (for a multiset call)
#Unicode String
if type(dataset) == unicode or type(dataset) == str:
url = self.API_URL + 'datasets/{}.csv?'.format(dataset)
#Array
elif type(dataset) == list:
url = self.API_URL + 'multisets.csv?columns='
#Format for multisets call
dataset = [d.replace('/', '.') for d in dataset]
for i in dataset:
url += i + ','
#remove trailing ,
url = url[:-1] + '&'
#If wrong format
else:
error = "Your dataset must either be specified as a string (containing a Quandl code) or an array (of Quandl codes) for multisets"
raise Exception(error)
url = self._append_query_fields(
url,
auth_token=auth_token,
trim_start=trim_start,
trim_end=trim_end,
**kwargs
)
return url
def _download(url):
'''
Used to download data outside of Quantopian.
'''
dframe = pd.read_csv(url, index_col=0, parse_dates=True)
return dframe
| {
"content_hash": "1b71c4a42f5e5730782bbedf7c091a6a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 142,
"avg_line_length": 37.60215053763441,
"alnum_prop": 0.5827852444952817,
"repo_name": "humdings/pynance-legacy",
"id": "0d5ff77ebbceb84ab2bbd909b757ee1f34623bc4",
"size": "3498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantopian/quandl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35886"
}
],
"symlink_target": ""
} |
from gearman.client import GearmanClient
client = GearmanClient(['localhost'])
URL = 'http://ifcb-data.whoi.edu/feed.json'
client.submit_job('spider', URL)
| {
"content_hash": "82bccfb3c4b8649ba1ce32eb870b641c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 20,
"alnum_prop": 0.7375,
"repo_name": "hsosik/ifcb-analysis",
"id": "c96dec800a1c78b9b68cc8c896a79cae38eb2832",
"size": "179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IFCB_tools/spider/jsonspider/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6604"
},
{
"name": "HTML",
"bytes": "39158"
},
{
"name": "JavaScript",
"bytes": "24942"
},
{
"name": "Limbo",
"bytes": "27046"
},
{
"name": "M",
"bytes": "1837"
},
{
"name": "MATLAB",
"bytes": "3235272"
},
{
"name": "Python",
"bytes": "140224"
},
{
"name": "Shell",
"bytes": "5611"
},
{
"name": "TeX",
"bytes": "14370"
},
{
"name": "XSLT",
"bytes": "2600"
}
],
"symlink_target": ""
} |
BASE_QUOTE_CHARS = '\'"'
class CScanSplit:
"""Split a string into words using the specified separator,
but respecting quotes and escape characters.
Special support for / at start of line (to allow /search).
Python rules for " and '.
Result is:
self.words: list of words
self.info: the quoting for the word
self.AsQuoted(): returns word list including quotes
"""
def __init__(self, s, sep=',', escChar='\\', quotes=BASE_QUOTE_CHARS):
self.sep = sep
self.escChar = escChar
self.quotes = quotes
s = s.strip()
wordStart = 0
quote = None
words = []
info = []
pos = 0
escape = False
lastInfo = None
if len(s) > 0 and (s[0] == '/' or s[-1] == '/'):
quoteChars = '%s/' % quotes
if s[0] == '/':
words.append('/')
info.append(lastInfo)
else:
quoteChars = quotes
while pos < len(s):
if s[pos] == escChar:
if len(s) > pos + 1 and s[pos + 1] in quoteChars:
s = s[:pos] + s[pos + 1:] # delete it
escape = True
if s[pos] in sep:
if quote:
pass
else:
words.append(s[wordStart:pos])
info.append(lastInfo)
lastInfo = None
wordStart = pos + 1
if wordStart == len(s): # ends with blank
words.append('')
info.append(lastInfo)
lastInfo = None
wordStart = pos + 1
while wordStart < len(s) and s[wordStart] in sep:
wordStart += 1 # reduce multiple separators to one
pos += 1 # change for double non-whitespace??
elif s[pos] == quote: # matching close
if not escape:
s = s[:pos] + s[pos + 1:] # delete it
quote = None
pos -= 1 # don't want to move on
elif s[pos] in quoteChars and not escape: # open quote or
# other quote in quote
if quote: # other quote
pos
else: # open quote
quote = s[pos]
lastInfo = quote
s = s[:pos] + s[pos + 1:] # delete it
pos -= 1 # don't want to move on
# else:
# pass
pos += 1
escape = False
if wordStart < pos:
words.append(s[wordStart:pos])
info.append(lastInfo)
while len(words) > 0 and words[-1] == '': # TODO: shouldn't need this.
words = words[:-1] # Caused by ) etc.
self.words = words
self.info = info
def AsQuoted(self):
return [(('%s%s%s' % (info, word, info)) if info else word)
for word, info in zip(self.words, self.info)]
def AsQuotedOld(self, quote='"'):
return [(('%s%s%s' % (quote, word, quote)) if info else word)
for word, info in zip(self.words, self.info)]
def ExpandTerm(self, n, expansion, leftQuotes='`'):
"""
Replaces the word at position n with the expansion text given.
If the term being expanded is quoted with anything other then
one of the quote characters in leftQuotes, the expansion
text will not be expanded and the quote will remain in place;
if the quote character was one of leftQuotes, it will be split.
"""
if self.info[n] and self.info[n] in leftQuotes: # expand
terms = CScanSplit(expansion, self.sep, self.escChar, self.quotes)
self.words = self.words[:n] + terms.words[:] + self.words[n+1:]
self.info = self.info[:n] + terms.info[:] + self.info[n+1:]
else: # no expansion required
self.words[n] = expansion[:]
self.info[n] = None # not left quoted any more!
| {
"content_hash": "3cf2a3c12034f8cb065ac09350197e2e",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 37.85964912280702,
"alnum_prop": 0.4594531974050046,
"repo_name": "njr0/fish",
"id": "280f31ac6e66e08a1095228c2ebd54781db73d68",
"size": "4316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fish/cline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Python",
"bytes": "222206"
},
{
"name": "Shell",
"bytes": "1524"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
import ray
import ray.rllib.ppo as ppo
import ray.rllib.es as es
import ray.rllib.dqn as dqn
import ray.rllib.a3c as a3c
parser = argparse.ArgumentParser(
description=("Train a reinforcement learning agent."))
parser.add_argument("--redis-address", default=None, type=str,
help="The Redis address of the cluster.")
parser.add_argument("--env", required=True, type=str,
help="The gym environment to use.")
parser.add_argument("--alg", required=True, type=str,
help="The reinforcement learning algorithm to use.")
parser.add_argument("--num-iterations", default=sys.maxsize, type=int,
help="The number of training iterations to run.")
parser.add_argument("--config", default="{}", type=str,
help="The configuration options of the algorithm.")
parser.add_argument("--upload-dir", default="file:///tmp/ray", type=str,
help="Where the traces are stored.")
parser.add_argument("--checkpoint-freq", default=sys.maxsize, type=int,
help="How many iterations between checkpoints.")
parser.add_argument("--restore", default="", type=str,
help="If specified, restores state from this checkpoint.")
if __name__ == "__main__":
args = parser.parse_args()
json_config = json.loads(args.config)
ray.init(redis_address=args.redis_address)
def _check_and_update(config, json):
for k in json.keys():
if k not in config:
raise Exception(
"Unknown model config `{}`, all model configs: {}".format(
k, config.keys()))
config.update(json)
env_name = args.env
if args.alg == "PPO":
config = ppo.DEFAULT_CONFIG.copy()
_check_and_update(config, json_config)
alg = ppo.PPOAgent(
env_name, config, upload_dir=args.upload_dir)
elif args.alg == "ES":
config = es.DEFAULT_CONFIG.copy()
_check_and_update(config, json_config)
alg = es.ESAgent(
env_name, config, upload_dir=args.upload_dir)
elif args.alg == "DQN":
config = dqn.DEFAULT_CONFIG.copy()
_check_and_update(config, json_config)
alg = dqn.DQNAgent(
env_name, config, upload_dir=args.upload_dir)
elif args.alg == "A3C":
config = a3c.DEFAULT_CONFIG.copy()
_check_and_update(config, json_config)
alg = a3c.A3CAgent(
env_name, config, upload_dir=args.upload_dir)
else:
assert False, ("Unknown algorithm, check --alg argument. Valid "
"choices are PPO, ES, DQN and A3C.")
result_logger = ray.rllib.common.RLLibLogger(
os.path.join(alg.logdir, "result.json"))
if args.restore:
alg.restore(args.restore)
for i in range(args.num_iterations):
result = alg.train()
# We need to use a custom json serializer class so that NaNs get
# encoded as null as required by Athena.
json.dump(result._asdict(), result_logger,
cls=ray.rllib.common.RLLibEncoder)
result_logger.write("\n")
print("current status: {}".format(result))
if (i + 1) % args.checkpoint_freq == 0:
print("checkpoint path: {}".format(alg.save()))
| {
"content_hash": "8f9b2cb931a877ff561da2115a2e2feb",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 37.365591397849464,
"alnum_prop": 0.6083453237410072,
"repo_name": "alanamarzoev/ray",
"id": "0bfb37efed32e6e4e8df317cedd50cfc4073fce1",
"size": "3498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/rllib/train.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "115351"
},
{
"name": "C++",
"bytes": "678730"
},
{
"name": "CMake",
"bytes": "17015"
},
{
"name": "CSS",
"bytes": "70"
},
{
"name": "HTML",
"bytes": "396"
},
{
"name": "Jupyter Notebook",
"bytes": "2507"
},
{
"name": "Python",
"bytes": "884337"
},
{
"name": "Ruby",
"bytes": "953"
},
{
"name": "Shell",
"bytes": "28965"
}
],
"symlink_target": ""
} |
from fobi.helpers import validate_submit_value_as
from .conf import get_setting
__title__ = 'fobi.contrib.plugins.form_elements.fields.' \
'select_model_object.settings'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('IGNORED_MODELS', 'SUBMIT_VALUE_AS',)
IGNORED_MODELS = get_setting('IGNORED_MODELS')
SUBMIT_VALUE_AS = get_setting('SUBMIT_VALUE_AS')
validate_submit_value_as(SUBMIT_VALUE_AS)
| {
"content_hash": "4d001f329908b072d1dee8cadd7f8ef5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 60,
"avg_line_length": 32.0625,
"alnum_prop": 0.7037037037037037,
"repo_name": "mansonul/events",
"id": "94922c8b61662123c7221ff75ebb31d3c2f7c96d",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/contrib/plugins/form_elements/fields/select_model_object/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90251"
},
{
"name": "HTML",
"bytes": "186225"
},
{
"name": "JavaScript",
"bytes": "43221"
},
{
"name": "Python",
"bytes": "804726"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
import networkx as nx
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from itertools import product
import random
import math
from functools import wraps
import pandas as pd
from .information import mutual_information
def compose(f, g):
"""
:param f: Second function to apply
:param g: First function to apply
:return: A composition of functions
"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
lmap = compose(list, map)
def sigmoid(x):
"""
Logistic sigmoid function
:param x: argument
:return: function value
"""
return 1 / (1 + math.exp(-x))
def extract_node_attribute(graph, name, default=None):
"""
Extract attributes of a networx graph nodes to a dict.
:param graph: target graph
:param name: name of the attribute
:param default: default value (used if node doesn't have the specified attribute)
:return: a dict of attributes in form of { node_name : attribute }
"""
return { i : d.get(name, default) for i, d in graph.nodes(data=True) }
def extract_edge_attribute(graph, name, default=None):
"""
Extract attributes of a networx graph edges to a dict.
:param graph: target graph
:param name: name of the attribute
:param default: default value (used if edge doesn't have the specified attribute)
:return: a dict of attributes in form of { (from, to) : attribute }
"""
return { (i, j) : d.get(name, default) for i, j, d in graph.edges(data=True) }
def pretty_draw(graph, node_color=lambda node, attr: '#DDDDDD',
edge_color=lambda node1, node2, attr: '#000000', node_size=lambda node, attr: 300, highres=False):
"""
Draws a graph. You can specify colors of nodes, colors of edges and size of nodes via lambda
functions.
:param graph: target graph
:param node_color: lambda function mapping node name and its attributes to the desired color
:param edge_color: lambda function mapping edge and its attributes to the desired color
:param node_size: lambda function mapping node name and its attributes to the desired size
:return: None
"""
if highres:
fig = plt.figure(figsize=(100, 100))
else:
fig = plt.figure(figsize=(17, 6))
plt.axis('off')
if type(node_color) is str:
node_colors = extract_node_attribute(graph, 'color', default='#DDDDDD')
node_colors = list(map(node_colors.__getitem__, graph.nodes()))
else:
node_colors = list(map(lambda args: node_color(*args), graph.nodes(data=True)))
if type(edge_color) is str:
edge_colors = extract_edge_attribute(graph, 'color', default='#000000')
edge_colors = list(map(edge_colors.__getitem__, graph.edges()))
else:
edge_colors = list(map(lambda args: edge_color(*args), graph.edges(data=True)))
if type(node_size) is str:
node_sizes = extract_node_attribute(graph, 'size', default='300')
node_sizes = list(map(node_sizes.__getitem__, graph.nodes()))
else:
node_sizes = list(map(lambda args: node_size(*args), graph.nodes(data=True)))
nx.draw_networkx(graph,
with_labels=True,
pos=nx.spring_layout(graph),
node_color=node_colors,
edge_color=edge_colors,
node_size=node_sizes
)
return None
def maximum_spanning_tree(graph, weight='weight'):
"""
Find a maximum spanning tree of a graph
:param graph: target graph
:param weight: edge attribute which will be used as edge weight
:return: maximum spanning tree graph (networkx.Graph)
"""
for i, j in graph.edges():
graph.edge[i][j][weight] = -graph.edge[i][j][weight]
result = nx.minimum_spanning_tree(graph, weight='weight')
for i, j in graph.edges():
graph.edge[i][j][weight] = -graph.edge[i][j][weight]
return result
def plot_distr_2d(distr, domain=(-25, 25), use_domain=False):
"""
Smart 1d probability distribution plotter. Finds out the interval where the most of probability
mass lies, and plots distribution on it (so you don't need to specify x-axis interval).
:param distr: distribution to plot in (vectorized) form of numpy.array<float> -> numpy.array<float>
:param domain: a superset of plotting interval (to narrow search)
:return: None
"""
if not use_domain:
def binary_search_quantiles(quantile, begin, end, prec):
while end - begin > prec:
sep = (begin + end) / 2.0
#print(sep, sp.integrate.quad(distr, -np.inf, sep)[0])
if sp.integrate.quad(distr, -np.inf, sep)[0] < quantile:
begin = sep
else:
end = sep
return (begin + end) / 2.0
alpha = 0.001
begin = binary_search_quantiles(alpha, domain[0], domain[1], 0.1)
end = binary_search_quantiles(1 - alpha, domain[0], domain[1], 0.1)
if abs(end - begin) < 1e-10:
begin, end = domain
else:
begin, end = domain
x = np.arange(begin, end, (end - begin) / 1000)
try:
plt.plot(x, lmap(distr, x))
except:
plt.plot(x, lmap(lambda x: distr(np.array(x)), x))
return None
def plot_distr_3d(distr):
"""
Plot 2d probability distribution.
:param distr: the probability distribution to plot in form of [float, float] -> float
:return: None
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = np.arange(-10, 10, 0.25)
Y = np.arange(-10, 10, 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.squeeze(np.array([[distr([X[i][j], Y[i][j]]) for j in range(X.shape[1])] for i in range(X.shape[0])]))
ax.plot_surface(X, Y, Z, color='#DDDDDD')
return None
def plot_distr(distr, dim=1, domain=(-25, 25), use_domain=False):
"""
Smart distribution plotting (whether 1d or 2d).
:param distr: the distribution to plot
:param dim: dimensionality (if known)
:param domain: domain for 1d version
:return: None
"""
if dim == 1:
try:
plot_distr_2d(distr, domain=domain, use_domain=use_domain)
except:
plot_distr_3d(distr)
else:
plot_distr_3d(distr)
return None
def flip_edge(graph, edge):
"""
Flips an edge in a networkx graph.
:param graph: a target graph
:param edge: edge to flip
:return: None
"""
if graph.has_edge(*edge):
graph.remove_edge(*edge)
else:
graph.add_edge(*edge)
return None
def spoil_graph(graph, p):
"""
'Spoils' a graph: flips every edge with probability p. Doesn't change the original graph.
:param graph: target graph
:param p: flip probability
:return: spoiled graph
"""
graph = graph.copy()
for i in range(len(graph.nodes())):
for j in range(i):
if random.random() < p:
flip_edge(graph, (i, j))
return graph
def reverse_edge(G, edge, copy=False):
"""
Reverse edge in graph.
:param G: target graph
:param edge: target edge
:param copy: if True, copy graph before changing it
:return: graph with reversed edge
"""
if copy:
G = G.copy()
x, y = edge
G.remove_edge(x, y)
G.add_edge(y, x)
return G
def are_equal_graphs(G1, G2):
"""
Check graph equality (equal node names, and equal edges between them).
:param G1: first graph
:param G2: second graph
:return: are they equal
"""
if set(G1.nodes()) != set(G2.nodes()):
return False
return all(map(lambda x: G1.has_edge(*x), G2.edges())) and all(map(lambda x: G2.has_edge(*x), G1.edges()))
def is_subgraph(G1, G2):
"""
Is G1 a subgraph of G2?
:param G1: supposed subgraph
:param G2: graph
:return: is G1 subgraph of G2
"""
return set(G1.edges()).issubset(set(G2.edges()))
def descendants(G, x):
"""
Set of all descendants of node in a graph, not including itself.
:param G: target graph
:param x: target node
:return: set of descendants
"""
return set(nx.dfs_preorder_nodes(G, x)) - {x}
def ancestors(G, x, G_reversed=None):
"""
Set of all ancestors of node in a graph, not including itself.
:param G: target graph
:param x: target node
:param G_reversed: you can supply graph with reversed edges for speedup
:return: set of ancestors
"""
if G_reversed is None:
G_reversed = G.reverse()
return descendants(G_reversed, x)
def reprsort(li):
"""
sometimes, we need a way to get an unique ordering of any Python objects
so here it is!
(not quite "any" Python objects, but let's hope we'll never deal with that)
"""
extli = list(zip(map(repr, li), range(len(li))))
extli.sort()
return [li[i[1]] for i in extli]
class ListTable(list): # from http://calebmadrigal.com/display-list-as-table-in-ipython-notebook/
"""
Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders an HTML Table in
IPython Notebook.
"""
def _repr_html_(self):
html = ["<table>"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
def pretty_print_distr_table(table, names):
"""
Get a ListTable of the distribution specified by `table`, so that it can be
prettily rendered in ipython notebook
:param table: table of the distribution
:param names: names assigned to variables in table
:return: ListTable
"""
table = np.array(table)
t = ListTable()
t.append(names + ['P'])
for v in product(*lmap(compose(list, range), table.shape)):
t.append(list(v) + ["%0.3f" % table[v]])
return t
def pretty_print_distr_dict(d, names):
"""
Get a ListTable of the distribution specified by dict `d`, so that it can be
prettily rendered in ipython notebook.
:param d: dict of the distribution
:param names: names assigned to variables in dict
:return: ListTable
"""
t = ListTable()
t.append(names + ['P'])
items = list(d.items())
try:
items.sort()
except TypeError:
items = reprsort(items)
for v, p in items:
t.append(list(v) + ["%0.3f" % p])
return t
class permutation_dict(dict):
"""
A modification of dict.
Tuple keys are considered equal, if the first can be obtained by permuting the second.
For example (1, 3, 2, 0) == (0, 1, 2, 3)
Also, hooks for __getitem__ and __setitem__ are provided.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._phook_setitem_ = lambda key, val: val
self._phook_getitem_ = lambda key, val: val
def __setitem__(self, arg, val):
if isinstance(arg, tuple):
arg = reprsort(list(arg))
arg = tuple(arg)
else:
arg = tuple([arg])
val = self._phook_setitem_(arg, val)
return super().__setitem__(arg, val)
def __getitem__(self, arg):
if isinstance(arg, tuple):
arg = reprsort(list(arg))
arg = tuple(arg)
else:
arg = tuple([arg])
return self._phook_getitem_(arg, super().__getitem__(arg))
def stabilize(alpha):
"""
Decorator which tries to reduce variance of a random function by
averaging it across multiple calls. Function must return a float.
:param alpha: required precision
:return: stabilized function
"""
def stabilize_decorator(f):
@wraps(f)
def new_f(*args, **kwargs):
x = 0.0
current = f(*args, **kwargs)
n = 1
while abs(x - current) > alpha or n == 1:
prev = current
x = f(*args, **kwargs)
current = (n / (n + 1)) * current + (x / (n + 1))
n += 1
return current
return new_f
return stabilize_decorator
def relmatrix(f, val1, val2):
"""
A table (2d numpy array) obtained by applying function `f` to different combinations of
values from `val1` and `val2`
:param f: applied function
:param val1: row values
:param val2: col values
:return: numpy array -- the table
"""
res = [[''] + list(val2)]
for v1 in val1:
li = [v1]
for v2 in val2:
li.append(f(v1, v2))
res.append(li)
return res
def infotable(data):
"""
Table of pairwise mutual informations between variables in the dataset.
:param data: the dataset
:return: the resulting table
"""
n_var = data.shape[1]
return [[mutual_information(data[:, i1:i1+1], data[:, i2:i2+1]) for i2 in range(n_var)] for i1 in range(n_var)]
def infomatrix(data):
"""
Table of pairwise mutual informations between variables in the dataset in the form of ListTable
:param data: the dataset
:return: the resulting table as ListTable
"""
n_var = data.shape[1]
return ListTable(relmatrix(lambda i1, i2: mutual_information(data[:, i1:i1+1], data[:, i2:i2+1]), range(n_var), range(n_var)))
def colvec(arr):
"""
Transforms a numpy array into a column vector.
:param arr: target arrray
:return: column vector -- numpy array of shape (n, 1)
"""
return np.transpose(np.atleast_2d(arr))
| {
"content_hash": "8913336540a39c61d1833b8de767ae5b",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 130,
"avg_line_length": 32.22857142857143,
"alnum_prop": 0.6030585106382979,
"repo_name": "DLunin/bayescraft",
"id": "04d3b456ea51e04fb94acf0c5f9af49571dd3bca",
"size": "13536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphmodels/utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124821"
}
],
"symlink_target": ""
} |
"""Adds total income to all public figures."""
import six
# String to prefix log messages with:
LOG_PREFIX = '[post_process_income] '
INCOME = 'income'
COMPENSATIONS = 'compensations'
OTHER_INCOME = 'other_income'
ALL_INCOMES = (INCOME, COMPENSATIONS, OTHER_INCOME)
SK_TO_EUR = 30.126
CURRENCY_SK = "Sk"
def parse_money_part(money_part):
tokens = money_part.split(" ")
if len(tokens) == 1: # currency is missing -> not a money value
return None
currency = tokens[-1]
# fix spaces between numerals and possible newline characters
value = int("".join(tokens[:-1]).replace("\xa0", ""))
return value, currency
def parse_income_col(val, col):
if not isinstance(val, six.string_types):
return None
parts = val.split(",")
if col == OTHER_INCOME: # the other parts do not contain incomes in this case
parts = parts[:1]
results = []
for part in parts:
money_part = part.split("(")[0].strip() # discard the part in parenthesis
result = parse_money_part(money_part)
if result is not None:
results.append(result)
return results
def parse_income_row(row):
total_income = 0
currencies = []
for i, col in enumerate(ALL_INCOMES):
parsed_incomes = parse_income_col(row[i], col)
if parsed_incomes is None:
continue
for result in parsed_incomes:
total_income += result[0]
currencies.append(result[1])
assert len(set(currencies)) <= 1, "Too many currencies appearing in the row."
currency = currencies[0] if currencies else None
if currency == CURRENCY_SK:
total_income = int(total_income / SK_TO_EUR)
return total_income
def add_incomes(db):
"""Parse and add incomes to assets."""
query = """
SELECT id, {}, {}, {}
FROM assetdeclarations
""".format(*ALL_INCOMES)
incomes = []
with db.get_server_side_cursor(query) as cur:
for row in cur:
eid = row[0]
income = parse_income_row(row[1:])
incomes.append((eid, income))
print('%sAccumulated %d incomes' % (LOG_PREFIX, len(incomes)))
query = "DROP TABLE IF EXISTS incomes"
db.execute(query)
query = """
CREATE TABLE incomes(
id serial PRIMARY KEY,
asset_declaration_id int REFERENCES assetdeclarations(Id) NOT NULL,
income int NOT NULL
);"""
db.execute(query)
with db.cursor() as cur:
q = """
INSERT INTO incomes(asset_declaration_id, income)
VALUES (%s, %s);
"""
cur.executemany(q, incomes)
| {
"content_hash": "a29404bcfb53ea64f0b00f2ec1352449",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 82,
"avg_line_length": 28.52173913043478,
"alnum_prop": 0.6082317073170732,
"repo_name": "verejnedigital/verejne.digital",
"id": "3000ba27f127064b47c961fb69abe5c96131b6e0",
"size": "2624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/prod_generation/post_process_income.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "19661"
},
{
"name": "JavaScript",
"bytes": "256025"
},
{
"name": "PHP",
"bytes": "21109"
},
{
"name": "Python",
"bytes": "293787"
},
{
"name": "SCSS",
"bytes": "34305"
},
{
"name": "Shell",
"bytes": "6726"
},
{
"name": "Smarty",
"bytes": "5018"
}
],
"symlink_target": ""
} |
__all__ = ['Distribution']
import re
from distutils.core import Distribution as _Distribution
from setuptools.depends import Require
from setuptools.command.install import install
from setuptools.command.sdist import sdist
from setuptools.command.install_lib import install_lib
from distutils.errors import DistutilsOptionError, DistutilsPlatformError
from distutils.errors import DistutilsSetupError
import setuptools, pkg_resources, distutils.core, distutils.dist, distutils.cmd
import os, distutils.log
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"%r is declared as a package namespace, but %r is not:"
" please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
raise DistutilsSetupError(
"%r must be a boolean value (got %r)" % (attr,value)
)
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError,ValueError):
raise DistutilsSetupError(
"%r must be a string or list of strings "
"containing valid project/version requirement specifiers" % (attr,)
)
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError, e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' -- a dictionary mapping option names to 'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__ (self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs.pop('setup_requires'))
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, (int,long,float)):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
from pkg_resources import working_set, parse_requirements
for dist in working_set.resolve(
parse_requirements(requires), installer=self.fetch_build_egg
):
working_set.add(dist)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in opts.keys():
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
cmd = easy_install(
dist, args=["x"], install_dir=os.curdir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report = True
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
cmdclass = ep.load(False) # don't require extras, we're not running
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
map(self.exclude_package, packages)
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras
):
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
| {
"content_hash": "3ff30fe5a25413f099a4bd8e6e863d93",
"timestamp": "",
"source": "github",
"line_count": 817,
"max_line_length": 97,
"avg_line_length": 38.09791921664627,
"alnum_prop": 0.5964788279894622,
"repo_name": "diego-d5000/MisValesMd",
"id": "5c17b5e83ac4f1a46e42f2c11f14a566beefd627",
"size": "31126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/dist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115465"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "1415583"
},
{
"name": "JavaScript",
"bytes": "1381588"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "8107650"
},
{
"name": "Shell",
"bytes": "11786"
}
],
"symlink_target": ""
} |
from distutils.core import setup
VERSION = 'v0.2'
setup(
name='pydialog',
version=VERSION,
author='Nic Roland',
author_email='[email protected]',
packages=['pydialog'],
description="Command line dialogs made easy.",
url = 'https://github.com/nicr9/pydialog',
download_url = 'https://github.com/nicr9/pydialog/tarball/%s' % VERSION,
license="MIT",
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces',
],
)
| {
"content_hash": "1ea04febc209c77afc06aa3327788f63",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 36.38461538461539,
"alnum_prop": 0.5560253699788583,
"repo_name": "nicr9/pydialog",
"id": "b9daa883cdec2a1615a6fb5446cc0231e02f0dd9",
"size": "946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4071"
}
],
"symlink_target": ""
} |
import os
import subprocess
import sys
# Pull out navbar from custom index.html page
f = open('/srv/projects/intro_programming/intro_programming/html_resources/index.html', 'r')
lines = f.readlines()
f.close()
navbar_string = ''
in_navbar = False
num_open_divs = 0
num_closed_divs = 0
for line in lines:
# Navbar is in first div for now, so at first div set True.
# Could start from 'Fixed navbar'
if '<div' in line:
in_navbar = True
num_open_divs += 1
if '</div' in line:
num_closed_divs += 1
if in_navbar:
navbar_string += line
if num_open_divs > 0 and num_open_divs == num_closed_divs:
in_navbar = False
break
# jquery is included in the header of each page, so I can use it elsewhere
# ie for toggling output and exercises.
# <script src="js/jquery.js"></script>
final_js_string = """
<!-- Bootstrap core JavaScript
================================================== -->
<!-- Placed at the end of the document so the pages load faster -->
<script src="js/bootstrap.min.js"></script>
"""
# Find all files to work with.
path_to_notebooks = '/srv/projects/intro_programming/intro_programming/notebooks/'
filenames = []
for filename in os.listdir(path_to_notebooks):
if '.html' in filename and filename != 'index.html':
filenames.append(filename)
# Insert navbar into each file, right after opening body tag.
# Then insert required js library at end of file.
for filename in filenames:
f = open(path_to_notebooks + filename, 'r')
lines = f.readlines()
f.close()
f = open(path_to_notebooks + filename, 'wb')
for line in lines:
if '<body>' in line:
f.write(line.encode('utf-8'))
f.write(navbar_string.encode('utf-8'))
f.write("\n\n".encode('utf-8'))
elif '</body>' in line:
f.write(final_js_string.encode('utf-8'))
f.write(line.encode('utf-8'))
else:
f.write(line.encode('utf-8'))
f.close()
| {
"content_hash": "9952da0b816467cfe537ae726d60b3d3",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 92,
"avg_line_length": 29.735294117647058,
"alnum_prop": 0.6097922848664689,
"repo_name": "chenleo/ipynotebook",
"id": "fc58b17602744d35643a5589873a829188bb4e95",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/add_bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "123981"
},
{
"name": "CSS",
"bytes": "4234"
},
{
"name": "JavaScript",
"bytes": "1647"
},
{
"name": "Python",
"bytes": "36163"
},
{
"name": "Shell",
"bytes": "19027"
},
{
"name": "Tcl",
"bytes": "411"
}
],
"symlink_target": ""
} |
import io
import json
import os
import pickle
import tempfile
import uuid
from django.core.management import call_command
from django.test import TestCase
from django.test import TransactionTestCase
from mock import call
from mock import MagicMock
from mock import Mock
from mock import patch
from sqlalchemy import create_engine
from .sqlalchemytesting import django_connection_engine
from .test_content_app import ContentNodeTestBase
from kolibri.core.content import models as content
from kolibri.core.content.constants.schema_versions import CONTENT_SCHEMA_VERSION
from kolibri.core.content.constants.schema_versions import NO_VERSION
from kolibri.core.content.constants.schema_versions import V020BETA1
from kolibri.core.content.constants.schema_versions import V040BETA3
from kolibri.core.content.constants.schema_versions import VERSION_1
from kolibri.core.content.constants.schema_versions import VERSION_2
from kolibri.core.content.constants.schema_versions import VERSION_3
from kolibri.core.content.models import AssessmentMetaData
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import File
from kolibri.core.content.models import LocalFile
from kolibri.core.content.utils.annotation import recurse_annotation_up_tree
from kolibri.core.content.utils.annotation import (
set_leaf_node_availability_from_local_file_availability,
)
from kolibri.core.content.utils.annotation import update_content_metadata
from kolibri.core.content.utils.channel_import import ChannelImport
from kolibri.core.content.utils.channel_import import import_channel_from_local_db
from kolibri.core.content.utils.sqlalchemybridge import get_default_db_string
@patch("kolibri.core.content.utils.channel_import.Bridge")
@patch("kolibri.core.content.utils.channel_import.ChannelImport.find_unique_tree_id")
@patch("kolibri.core.content.utils.channel_import.apps")
class BaseChannelImportClassConstructorTestCase(TestCase):
"""
Testcase for the base channel import class constructor
"""
def test_channel_id(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
self.assertEqual(channel_import.channel_id, "test")
@patch("kolibri.core.content.utils.channel_import.get_content_database_file_path")
def test_two_bridges(self, db_path_mock, apps_mock, tree_id_mock, BridgeMock):
db_path_mock.return_value = "test"
ChannelImport("test")
BridgeMock.assert_has_calls(
[
call(sqlite_file_path="test"),
call(app_name="content", schema_version=CONTENT_SCHEMA_VERSION),
]
)
@patch("kolibri.core.content.utils.channel_import.get_content_database_file_path")
def test_get_config(self, db_path_mock, apps_mock, tree_id_mock, BridgeMock):
ChannelImport("test")
apps_mock.assert_has_calls(
[
call.get_app_config("content"),
call.get_app_config().get_models(include_auto_created=True),
]
)
def test_tree_id(self, apps_mock, tree_id_mock, BridgeMock):
ChannelImport("test")
tree_id_mock.assert_called_once_with()
@patch("kolibri.core.content.utils.channel_import.Bridge")
@patch(
"kolibri.core.content.utils.channel_import.ChannelImport.get_all_destination_tree_ids"
)
@patch("kolibri.core.content.utils.channel_import.apps")
class BaseChannelImportClassMethodUniqueTreeIdTestCase(TestCase):
"""
Testcase for the base channel import class unique tree id generator
"""
def test_empty(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = []
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 1)
def test_one_one(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = [1]
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 2)
def test_one_two(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = [2]
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 1)
def test_two_one_two(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = [1, 2]
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 3)
def test_two_one_three(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = [1, 3]
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 2)
def test_three_one_two_three(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = [1, 2, 3]
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 4)
def test_three_one_two_four(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = [1, 2, 4]
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 3)
def test_three_one_three_four(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = [1, 3, 4]
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 2)
def test_three_one_three_five(self, apps_mock, tree_ids_mock, BridgeMock):
tree_ids_mock.return_value = [1, 3, 5]
channel_import = ChannelImport("test")
self.assertEqual(channel_import.find_unique_tree_id(), 2)
@patch("kolibri.core.content.utils.channel_import.Bridge")
@patch("kolibri.core.content.utils.channel_import.ChannelImport.find_unique_tree_id")
@patch("kolibri.core.content.utils.channel_import.apps")
class BaseChannelImportClassGenRowMapperTestCase(TestCase):
"""
Testcase for the base channel import class row mapper generator
"""
def test_base_mapper(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
mapper = channel_import.generate_row_mapper()
record = MagicMock()
record.test_attr = "test_val"
self.assertEqual(mapper(record, "test_attr"), "test_val")
def test_column_name_mapping(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
mappings = {"test_attr": "test_attr_mapped"}
mapper = channel_import.generate_row_mapper(mappings=mappings)
record = MagicMock()
record.test_attr_mapped = "test_val"
self.assertEqual(mapper(record, "test_attr"), "test_val")
def test_method_mapping(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
mappings = {"test_attr": "test_map_method"}
mapper = channel_import.generate_row_mapper(mappings=mappings)
record = {}
test_map_method = Mock()
test_map_method.return_value = "test_val"
channel_import.test_map_method = test_map_method
self.assertEqual(mapper(record, "test_attr"), "test_val")
def test_no_column_mapping(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
mappings = {"test_attr": "test_attr_mapped"}
mapper = channel_import.generate_row_mapper(mappings=mappings)
record = Mock(spec=["test_attr"])
with self.assertRaises(AttributeError):
mapper(record, "test_attr")
@patch("kolibri.core.content.utils.channel_import.Bridge")
@patch("kolibri.core.content.utils.channel_import.ChannelImport.find_unique_tree_id")
@patch("kolibri.core.content.utils.channel_import.apps")
class BaseChannelImportClassGenTableMapperTestCase(TestCase):
"""
Testcase for the base channel import class table mapper generator
"""
def test_base_mapper(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
mapper = channel_import.generate_table_mapper()
self.assertEqual(mapper, channel_import.base_table_mapper)
def test_method_mapping(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
table_map = "test_map_method"
test_map_method = Mock()
channel_import.test_map_method = test_map_method
mapper = channel_import.generate_table_mapper(table_map=table_map)
self.assertEqual(mapper, test_map_method)
def test_no_column_mapping(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
table_map = "test_map_method"
with self.assertRaises(AttributeError):
channel_import.generate_table_mapper(table_map=table_map)
@patch("kolibri.core.content.utils.channel_import.Bridge")
@patch("kolibri.core.content.utils.channel_import.ChannelImport.find_unique_tree_id")
@patch("kolibri.core.content.utils.channel_import.apps")
class BaseChannelImportClassTableImportTestCase(TestCase):
"""
Testcase for the base channel import class table import method
"""
def test_no_models_unflushed_rows_passed_through(
self, apps_mock, tree_id_mock, BridgeMock
):
channel_import = ChannelImport("test")
record_mock = MagicMock(spec=["__table__"])
channel_import.destination.get_class.return_value = record_mock
self.assertEqual(
0,
channel_import.table_import(
MagicMock(), lambda x, y: None, lambda x: [], 0
),
)
def test_no_merge_records_bulk_insert_no_flush(
self, apps_mock, tree_id_mock, BridgeMock
):
channel_import = ChannelImport("test")
record_mock = MagicMock(spec=["__table__"])
record_mock.__table__.columns.items.return_value = [("test_attr", MagicMock())]
channel_import.destination.get_class.return_value = record_mock
channel_import.table_import(
MagicMock(), lambda x, y: "test_val", lambda x: [{}] * 100, 0
)
channel_import.destination.session.flush.assert_not_called()
def test_no_merge_records_bulk_insert_flush(
self, apps_mock, tree_id_mock, BridgeMock
):
channel_import = ChannelImport("test")
record_mock = MagicMock(spec=["__table__"])
record_mock.__table__.columns.items.return_value = [("test_attr", MagicMock())]
channel_import.destination.get_class.return_value = record_mock
channel_import.table_import(
MagicMock(), lambda x, y: "test_val", lambda x: [{}] * 10000, 0
)
channel_import.destination.session.flush.assert_called_once_with()
@patch("kolibri.core.content.utils.channel_import.merge_models", new=[])
def test_merge_records_merge_no_flush(self, apps_mock, tree_id_mock, BridgeMock):
from kolibri.core.content.utils.channel_import import merge_models
channel_import = ChannelImport("test")
record_mock = MagicMock(spec=["__table__"])
record_mock.__table__.columns.items.return_value = [("test_attr", MagicMock())]
channel_import.destination.get_class.return_value = record_mock
model_mock = MagicMock()
model_mock._meta.pk.name = "test_attr"
merge_models.append(model_mock)
channel_import.merge_record = Mock()
channel_import.table_import(
model_mock, lambda x, y: "test_val", lambda x: [{}] * 100, 0
)
channel_import.destination.session.flush.assert_not_called()
@patch("kolibri.core.content.utils.channel_import.merge_models", new=[])
def test_merge_records_merge_flush(self, apps_mock, tree_id_mock, BridgeMock):
from kolibri.core.content.utils.channel_import import merge_models
channel_import = ChannelImport("test")
record_mock = Mock(spec=["__table__"])
record_mock.__table__.columns.items.return_value = [("test_attr", MagicMock())]
channel_import.destination.get_class.return_value = record_mock
model_mock = Mock()
model_mock._meta.pk.name = "test_attr"
merge_models.append(model_mock)
channel_import.merge_record = Mock()
channel_import.table_import(
model_mock, lambda x, y: "test_val", lambda x: [{}] * 10000, 0
)
channel_import.destination.session.flush.assert_called_once_with()
@patch("kolibri.core.content.utils.channel_import.Bridge")
@patch("kolibri.core.content.utils.channel_import.ChannelImport.find_unique_tree_id")
@patch("kolibri.core.content.utils.channel_import.apps")
class BaseChannelImportClassOtherMethodsTestCase(TestCase):
"""
Testcase for the base channel import class remaining methods
"""
def test_import_channel_methods_called(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
model_mock = Mock(spec=["__name__"])
channel_import.content_models = [model_mock]
mapping_mock = Mock()
channel_import.schema_mapping = {model_mock: mapping_mock}
with patch.object(channel_import, "generate_row_mapper"), patch.object(
channel_import, "generate_table_mapper"
), patch.object(channel_import, "table_import"), patch.object(
channel_import, "check_and_delete_existing_channel"
):
channel_import.import_channel_data()
channel_import.generate_row_mapper.assert_called_once_with(
mapping_mock.get("per_row")
)
channel_import.generate_table_mapper.assert_called_once_with(
mapping_mock.get("per_table")
)
channel_import.table_import.assert_called_once()
channel_import.check_and_delete_existing_channel.assert_called_once()
channel_import.destination.session.commit.assert_called_once_with()
def test_end(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
channel_import.end()
channel_import.destination.end.assert_has_calls([call(), call()])
def test_destination_tree_ids(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
class_mock = Mock()
channel_import.destination.get_class.return_value = class_mock
channel_import.get_all_destination_tree_ids()
channel_import.destination.assert_has_calls(
[
call.session.query(class_mock.tree_id),
call.session.query().distinct(),
call.session.query().distinct().all(),
]
)
def test_base_table_mapper(self, apps_mock, tree_id_mock, BridgeMock):
channel_import = ChannelImport("test")
class_mock = Mock()
[record for record in channel_import.base_table_mapper(class_mock)]
channel_import.destination.assert_has_calls(
[call.session.query(class_mock), call.session.query().all()]
)
class MaliciousDatabaseTestCase(TestCase):
@patch("kolibri.core.content.utils.channel_import.initialize_import_manager")
def test_non_existent_root_node(self, initialize_manager_mock):
import_mock = MagicMock()
initialize_manager_mock.return_value = import_mock
channel_id = "6199dde695db4ee4ab392222d5af1e5c"
def create_channel():
ChannelMetadata.objects.create(
id=channel_id, name="test", min_schema_version="1", root_id=channel_id
)
import_mock.import_channel_data.side_effect = create_channel
import_channel_from_local_db(channel_id)
try:
channel = ChannelMetadata.objects.get(id=channel_id)
assert channel.root
except ContentNode.DoesNotExist:
self.fail("Channel imported without a valid root node")
SCHEMA_PATH_TEMPLATE = os.path.join(
os.path.dirname(__file__), "../fixtures/{name}_content_schema"
)
DATA_PATH_TEMPLATE = os.path.join(
os.path.dirname(__file__), "../fixtures/{name}_content_data.json"
)
class ContentImportTestBase(TransactionTestCase):
"""This is run using a TransactionTestCase,
as by default, Django runs each test inside an atomic context in order to easily roll back
any changes to the DB. However, as we are setting things in the Django DB using SQLAlchemy
these changes are not caught by this atomic context, and data will persist across tests.
In order to deal with this, we call an explicit db flush at the end of every test case,
both this flush and the SQLAlchemy insertions can cause issues with the atomic context used
by Django."""
@property
def schema_name(self):
return self.legacy_schema or self.name
@property
def data_name(self):
return self.legacy_schema or self.name
def setUp(self):
try:
self.set_content_fixture()
except (IOError, EOFError):
print(
"No content schema and/or data for {name}".format(name=self.schema_name)
)
super(ContentImportTestBase, self).setUp()
@patch("kolibri.core.content.utils.channel_import.get_content_database_file_path")
def set_content_fixture(self, db_path_mock):
_, self.content_db_path = tempfile.mkstemp(suffix=".sqlite3")
db_path_mock.return_value = self.content_db_path
self.content_engine = create_engine(
"sqlite:///" + self.content_db_path, convert_unicode=True
)
with open(SCHEMA_PATH_TEMPLATE.format(name=self.schema_name), "rb") as f:
metadata = pickle.load(f)
data_path = DATA_PATH_TEMPLATE.format(name=self.data_name)
with io.open(data_path, mode="r", encoding="utf-8") as f:
data = json.load(f)
metadata.bind = self.content_engine
metadata.create_all()
conn = self.content_engine.connect()
# Write data for each fixture into the table
for table in metadata.sorted_tables:
if data[table.name]:
conn.execute(table.insert(), data[table.name])
conn.close()
with patch(
"kolibri.core.content.utils.sqlalchemybridge.get_engine",
new=self.get_engine,
):
import_channel_from_local_db("6199dde695db4ee4ab392222d5af1e5c")
update_content_metadata("6199dde695db4ee4ab392222d5af1e5c")
def get_engine(self, connection_string):
if connection_string == get_default_db_string():
return django_connection_engine()
return self.content_engine
def tearDown(self):
call_command("flush", interactive=False)
super(ContentImportTestBase, self).tearDown()
@classmethod
def tearDownClass(cls):
django_connection_engine().dispose()
super(ContentImportTestBase, cls).tearDownClass()
class NaiveImportTestCase(ContentNodeTestBase, ContentImportTestBase):
"""
Integration test for naive import
"""
# When incrementing content schema versions, this should be incremented to the new version
# A new TestCase for importing for this old version should then be subclassed from this TestCase
# See 'NoVersionImportTestCase' below for an example
name = CONTENT_SCHEMA_VERSION
legacy_schema = None
def test_no_update_old_version(self):
channel = ChannelMetadata.objects.first()
channel.version += 1
channel_version = channel.version
channel.save()
self.set_content_fixture()
channel.refresh_from_db()
self.assertEqual(channel.version, channel_version)
def test_localfile_available_remain_after_import(self):
local_file = LocalFile.objects.get(pk="9f9438fe6b0d42dd8e913d7d04cfb2b2")
local_file.available = True
local_file.save()
self.set_content_fixture()
local_file.refresh_from_db()
self.assertTrue(local_file.available)
def residual_object_deleted(self, Model):
# Checks that objects previously associated with a channel are deleted on channel upgrade
obj = Model.objects.first()
# older databases may not import data for all models so if this is None, ignore
if obj is not None:
# Set id to a new UUID so that it does an insert at save
obj.id = uuid.uuid4().hex
obj.save()
obj_id = obj.id
channel = ChannelMetadata.objects.first()
# Decrement current channel version to ensure reimport
channel.version -= 1
channel.save()
self.set_content_fixture()
with self.assertRaises(Model.DoesNotExist):
assert Model.objects.get(pk=obj_id)
def test_residual_file_deleted_after_reimport(self):
self.residual_object_deleted(File)
def test_residual_assessmentmetadata_deleted_after_reimport(self):
self.residual_object_deleted(AssessmentMetaData)
def test_residual_contentnode_deleted_after_reimport(self):
root_node = ChannelMetadata.objects.first().root
obj = ContentNode.objects.create(
title="test",
id=uuid.uuid4().hex,
parent=root_node,
content_id=uuid.uuid4().hex,
channel_id=root_node.channel_id,
)
obj_id = obj.id
channel = ChannelMetadata.objects.first()
# Decrement current channel version to ensure reimport
channel.version -= 1
channel.save()
self.set_content_fixture()
with self.assertRaises(ContentNode.DoesNotExist):
assert ContentNode.objects.get(pk=obj_id)
def test_existing_localfiles_are_not_overwritten(self):
with patch(
"kolibri.core.content.utils.sqlalchemybridge.get_engine",
new=self.get_engine,
):
channel_id = "6199dde695db4ee4ab392222d5af1e5c"
channel = ChannelMetadata.objects.get(id=channel_id)
# mark LocalFile objects as available
for f in channel.root.children.first().files.all():
f.local_file.available = True
f.local_file.save()
# channel's not yet available, as we haven't done the annotation
assert not channel.root.available
# propagate availability up the tree
set_leaf_node_availability_from_local_file_availability(channel_id)
recurse_annotation_up_tree(channel_id=channel_id)
# after reloading, channel should now be available
channel.root.refresh_from_db()
assert channel.root.available
# set the channel version to a low number to ensure we trigger a re-import of metadata
ChannelMetadata.objects.filter(id=channel_id).update(version=-1)
# reimport the metadata
self.set_content_fixture()
# after reloading, the files and their ancestor ContentNodes should all still be available
channel.root.refresh_from_db()
assert channel.root.available
assert channel.root.children.first().files.all()[0].local_file.available
class ImportLongDescriptionsTestCase(ContentImportTestBase, TransactionTestCase):
"""
When using Postgres, char limits on fields are enforced strictly. This was causing errors importing as described in:
https://github.com/learningequality/kolibri/issues/3600
"""
name = CONTENT_SCHEMA_VERSION
legacy_schema = None
data_name = "longdescriptions"
longdescription = "soverylong" * 45
def test_long_descriptions(self):
self.assertEqual(
ContentNode.objects.get(
id="32a941fb77c2576e8f6b294cde4c3b0c"
).license_description,
self.longdescription,
)
self.assertEqual(
ContentNode.objects.get(id="2e8bac07947855369fe2d77642dfc870").description,
self.longdescription,
)
class Version3ImportTestCase(NaiveImportTestCase):
"""
Integration test for import from no version import
"""
name = VERSION_3
@classmethod
def tearDownClass(cls):
super(Version3ImportTestCase, cls).tearDownClass()
@classmethod
def setUpClass(cls):
super(Version3ImportTestCase, cls).setUpClass()
class Version2ImportTestCase(NaiveImportTestCase):
"""
Integration test for import from no version import
"""
name = VERSION_2
@classmethod
def tearDownClass(cls):
super(Version2ImportTestCase, cls).tearDownClass()
@classmethod
def setUpClass(cls):
super(Version2ImportTestCase, cls).setUpClass()
class Version1ImportTestCase(NaiveImportTestCase):
"""
Integration test for import from no version import
"""
name = VERSION_1
@classmethod
def tearDownClass(cls):
super(Version1ImportTestCase, cls).tearDownClass()
@classmethod
def setUpClass(cls):
super(Version1ImportTestCase, cls).setUpClass()
class NoVersionImportTestCase(NaiveImportTestCase):
"""
Integration test for import from no version import
"""
name = NO_VERSION
@classmethod
def tearDownClass(cls):
super(NoVersionImportTestCase, cls).tearDownClass()
@classmethod
def setUpClass(cls):
super(NoVersionImportTestCase, cls).setUpClass()
class NoVersionv020ImportTestCase(NoVersionImportTestCase):
"""
Integration test for import from no version import
for legacy schema 0.2.0beta1
"""
legacy_schema = V020BETA1
def test_lang_str(self):
# test for Language __str__
p = content.Language.objects.get(lang_code="en")
self.assertEqual(str(p), "")
@classmethod
def tearDownClass(cls):
super(NoVersionv020ImportTestCase, cls).tearDownClass()
@classmethod
def setUpClass(cls):
super(NoVersionv020ImportTestCase, cls).setUpClass()
class NoVersionv040ImportTestCase(NoVersionv020ImportTestCase):
"""
Integration test for import from no version import
for legacy schema 0.4.0beta3
"""
legacy_schema = V040BETA3
@classmethod
def tearDownClass(cls):
super(NoVersionv020ImportTestCase, cls).tearDownClass()
@classmethod
def setUpClass(cls):
super(NoVersionv040ImportTestCase, cls).setUpClass()
| {
"content_hash": "203d6bcc4884414127d8429397d52516",
"timestamp": "",
"source": "github",
"line_count": 686,
"max_line_length": 120,
"avg_line_length": 38.50874635568513,
"alnum_prop": 0.6695688382480978,
"repo_name": "mrpau/kolibri",
"id": "1004245db12184d74247e0bea9be353f8203dd6f",
"size": "26417",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/content/test/test_channel_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "1716299"
},
{
"name": "Dockerfile",
"bytes": "7303"
},
{
"name": "Gherkin",
"bytes": "278074"
},
{
"name": "HTML",
"bytes": "26440"
},
{
"name": "JavaScript",
"bytes": "1537923"
},
{
"name": "Makefile",
"bytes": "13308"
},
{
"name": "Python",
"bytes": "2298911"
},
{
"name": "Shell",
"bytes": "11777"
},
{
"name": "Vue",
"bytes": "1558714"
}
],
"symlink_target": ""
} |
"""
Digital clock implemented in GTK2.
"""
# STDLIB IMPORTS
from math import pi
# THIRD PARTY IMPORTS
from gtk import main
from gtk.gdk import color_parse
import gtk
# LOCAL IMPORTS
from _BasePyClock import BasePyClock
# TUPLE CONTAING DICTIONARIES DETAILING WHICH SEGMENTS WILL NEED TO BE
# 'ON' TO DISPLAY A NUMBER.
SEGMENTS = (
# Number 0:
{"a": True, "b": True, "c": True, "d": True, "e": True, "f": True,
"g": False},
# Number 1:
{"a": False, "b": True, "c": True, "d": False, "e": False, "f": False,
"g": False},
# Number 2:
{"a": True, "b": True, "c": False, "d": True, "e": True, "f": False,
"g": True},
# Number 3:
{"a": True, "b": True, "c": True, "d": True, "e": False, "f": False,
"g": True},
# Number 4:
{"a": False, "b": True, "c": True, "d": False, "e": False, "f": True,
"g": True},
# Number 5:
{"a": True, "b": False, "c": True, "d": True, "e": False, "f": True,
"g": True},
# Number 6:
{"a": True, "b": False, "c": True, "d": True, "e": True, "f": True,
"g": True},
# Number 7:
{"a": True, "b": True, "c": True, "d": False, "e": False, "f": False,
"g": False},
# Number 8:
{"a": True, "b": True, "c": True, "d": True, "e": True, "f": True,
"g": True},
# Number 9:
{"a": True, "b": True, "c": True, "d": False, "e": False, "f": True,
"g": True})
class DigitalPyClock(BasePyClock):
"""
Class implementing the DigitalPyClock.
"""
_SEGMENT_ORIENTATION = {"a": 'h',
"b": 'v',
"c": 'v',
"d": 'h',
"e": 'v',
"f": 'v',
"g": 'h'}
# segement offsets for drawing the seven segment display.
_SEGMENT_OFFSETS = {"a": (5, 0),
"b": (60, 5),
"c": (60, 65),
"d": (5, 120),
"e": (0, 65),
"f": (0, 5),
"g": (5, 60)}
def __init__(self, led_colour="red"):
"""
Instantiate an instance of DigitalPyClock.
"""
super(DigitalPyClock, self).__init__(title="Digital PyClock",
init_width=640, init_height=160)
self._colour_obj = color_parse(spec=led_colour)
self._led_red = self._colour_obj.red / 65535.0
self._led_green = self._colour_obj.green / 65535.0
self._led_blue = self._colour_obj.blue / 65535.0
def _draw_clock(self):
"""
Draw the clock.
"""
self._draw_hour_segments()
self._draw_double_dots(x_pos=210, y_pos=50, y_pos2=110, radius=10)
self._draw_minute_segments()
self._draw_double_dots(x_pos=425, y_pos=50, y_pos2=110, radius=10)
self._draw_second_segments()
def _draw_hour_segments(self):
"""
Draw the segments to display the hour.
"""
tens = self._time.hour / 10
units = self._time.hour % 10
self._draw_seven_segment(number=tens, start_x=20, start_y=20)
self._draw_seven_segment(number=units, start_x=115, start_y=20)
def _draw_minute_segments(self):
"""
Draw the segments to display the minutes.
"""
tens = self._time.minute / 10
units = self._time.minute % 10
self._draw_seven_segment(number=tens, start_x=240, start_y=20)
self._draw_seven_segment(number=units, start_x=335, start_y=20)
def _draw_second_segments(self):
"""
Draw the segments to display the seconds.
"""
tens = self._time.second / 10
units = self._time.second % 10
self._draw_seven_segment(number=tens, start_x=460, start_y=20)
self._draw_seven_segment(number=units, start_x=555, start_y=20)
def _draw_seven_segment(self, number, start_x, start_y):
"""
Draw the seven segment display.
a
f b
g
e c
d
@type number: int
@param number: Number to display on the seven segment.
@type start_x: int
@param start_x: Point on the X axis to start drawing the
display from.
@type start_y: int
@param start_y: Point on the Y axis to start drawing the
display from.
"""
num_segment = SEGMENTS[number]
segments = num_segment.keys()
for segment in segments:
on = num_segment.get(segment, False)
offset = self._SEGMENT_OFFSETS.get(segment)
x_offset = offset[0]
y_offset = offset[1]
if self._SEGMENT_ORIENTATION.get(segment) == 'h':
self._draw_h_led(start_x=(start_x + x_offset),
start_y=(start_y + y_offset),
fill=on)
else:
self._draw_v_led(start_x=(start_x + x_offset),
start_y=(start_y + y_offset),
fill=on)
def _draw_h_led(self, start_x, start_y, fill=True):
"""
Draw a horizontal LED: <=>
@type start_x: int
@param start_x: Point on the X axis to start drawing the LED
from.
@type start_y: int
@param start_y: Point on the Y axis to start drawing the LED
from.
@type fill: bool
@param fill: If True, set the colour of the LED to the
colour_obj instance variable, else set the colour to black.
"""
points = ((start_x + 10, start_y + 10), (start_x + 40, start_y + 10),
(start_x + 50, start_y), (start_x + 40, start_y - 10),
(start_x + 10, start_y - 10), (start_x, start_y))
self._draw_segment(start_x=start_x, start_y=start_y, fill=fill,
points=points)
def _draw_v_led(self, start_x, start_y, fill=True):
"""
Draw a vertical LED:
/\
||
\/
@type start_x: int
@param start_x: Point on the X axis to start drawing the LED
from.
@type start_y: int
@param start_y: Point on the Y axis to start drawing the LED
from.
@type fill: bool
@param fill: If True, set the colour of the LED to the
colour_obj instance variable, else set the colour to black.
"""
points = ((start_x + 10, start_y + 10), (start_x + 10, start_y + 40),
(start_x, start_y + 50), (start_x - 10, start_y + 40),
(start_x - 10, start_y + 10), (start_x, start_y))
self._draw_segment(start_x=start_x, start_y=start_y, fill=fill,
points=points)
def _draw_segment(self, start_x, start_y, points, fill=True):
"""
Draw the LED.
@type start_x: int
@param start_x: Point on the X axis to start drawing the LED
from.
@type start_y: int
@param start_y: Point on the Y axis to start drawing the LED
from.
@type points: ( (int, int) )
@param points: Tuple containg tuples of X and Y coordinates
which will be used to draw the polygon.
@type fill: bool
@param fill: If True, set the colour of the LED to the
colour_obj instance variable, else set the colour to black.
"""
self._context.save()
# self._context.set_line_width(2.5 * self._context.get_line_width())
self._context.move_to(start_x, start_y)
for point in points:
self._context.line_to(point[0], point[1])
if fill:
self._context.set_source_rgb(self._led_red,
self._led_green,
self._led_blue)
else:
self._context.set_source_rgb(self._led_red * 0.4,
self._led_green * 0.4,
self._led_blue * 0.4)
# LED background
self._context.fill_preserve()
# LED outline.
self._context.set_source_rgb(0.0, 0.0, 0.0)
self._context.stroke()
self._context.restore()
def _draw_double_dots(self, x_pos, y_pos, y_pos2, radius):
"""
Draw the double dots (:) which separate the hour, minute and
seconds.
@type x_pos: int
@param x_pos: Center point along the X axis of the circle.
@type y_pos: int
@param y_pos: Center point along the Y axis to draw the first
circle.
@type y_pos2: int
@param y_pos2: Center point along the Y axis to draw the second
circle.
@type radius: int
@param radius: Radius of the dots to draw.
"""
self._context.save()
self._context.arc(x_pos, y_pos, radius, 0, 2 * pi)
self._context.set_source_rgb(self._led_red, self._led_green,
self._led_blue)
self._context.fill_preserve()
self._context.set_source_rgb(0.0, 0.0, 0.0)
self._context.stroke()
self._context.arc(x_pos, y_pos2, radius, 0, 2 * pi)
self._context.set_source_rgb(self._led_red, self._led_green,
self._led_blue)
self._context.fill_preserve()
self._context.set_source_rgb(0.0, 0.0, 0.0)
self._context.stroke()
self._context.restore()
if __name__ == "__main__":
DigitalPyClock()
main()
| {
"content_hash": "30e3bfbf762ee0446c310ceebe396ec5",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 77,
"avg_line_length": 31.50326797385621,
"alnum_prop": 0.4978215767634855,
"repo_name": "dummyload/PyClock",
"id": "c3cc141fba8f654b95d4c1233915c03b21eddcb5",
"size": "9659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digital.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24136"
}
],
"symlink_target": ""
} |
from pyflann import *
from copy import copy
from numpy import *
from numpy.random import *
import unittest
class Test_PyFLANN_nn(unittest.TestCase):
def setUp(self):
self.nn = FLANN()
class Test_PyFLANN_nn_index(unittest.TestCase):
def testnn_index_save_kdtree_1(self):
self.run_nn_index_save_perturbed(64,1000, algorithm="kdtree", trees=1)
def testnn_index_save_kdtree_4(self):
self.run_nn_index_save_perturbed(64,1000, algorithm="kdtree", trees=4)
def testnn_index_save_kdtree_10(self):
self.run_nn_index_save_perturbed(64,1000, algorithm="kdtree", trees=10)
def testnn_index_save_kmeans_2(self):
self.run_nn_index_save_perturbed(64,1000, algorithm="kmeans", branching=2, iterations=11)
def testnn_index_save_kmeans_16(self):
self.run_nn_index_save_perturbed(64,1000, algorithm="kmeans", branching=16, iterations=11)
def testnn_index_save_kmeans_32(self):
self.run_nn_index_save_perturbed(64,1000, algorithm="kmeans", branching=32, iterations=11)
def testnn_index_save_kmeans_64(self):
self.run_nn_index_save_perturbed(64,1000, algorithm="kmeans", branching=64, iterations=11)
def testnn__save_kdtree_1(self):
self.run_nn_index_save_rand(64,10000,1000, algorithm="kdtree", trees=1, checks=128)
def testnn__save_kdtree_4(self):
self.run_nn_index_save_rand(64,10000,1000, algorithm="kdtree", trees=4, checks=128)
def testnn__save_kdtree_10(self):
self.run_nn_index_save_rand(64,10000,1000, algorithm="kdtree", trees=10, checks=128)
def testnn__save_kmeans_2(self):
self.run_nn_index_save_rand(64,1000,1000, algorithm="kmeans", branching=2, iterations=11, checks=64)
def testnn__save_kmeans_8(self):
self.run_nn_index_save_rand(64,10000,1000, algorithm="kmeans", branching=8, iterations=11, checks=32)
def testnn__save_kmeans_16(self):
self.run_nn_index_save_rand(64,10000,1000, algorithm="kmeans", branching=16, iterations=11, checks=40)
def testnn__save_kmeans_32(self):
self.run_nn_index_save_rand(64,10000,1000, algorithm="kmeans", branching=32, iterations=11, checks=56)
def run_nn_index_save_perturbed(self, dim, N, **kwargs):
x = rand(N, dim)
nn = FLANN()
nn.build_index(x, **kwargs)
nn.save_index("index.dat")
nn.delete_index();
nn = FLANN()
nn.load_index("index.dat",x)
x_query = x + randn(x.shape[0], x.shape[1])*0.0001/dim
nnidx, nndist = nn.nn_index(x_query)
correct = all(nnidx == arange(N, dtype = index_type))
nn.delete_index()
self.assert_(correct)
def run_nn_index_save_rand(self, dim, N, Nq, **kwargs):
x = rand(N, dim)
x_query = rand(Nq,dim)
# build index, search and delete it
nn = FLANN()
nn.build_index(x, **kwargs)
nnidx, nndist = nn.nn_index(x_query, checks=kwargs["checks"])
nn.save_index("index.dat")
del nn
# now reload index and search again
nn = FLANN()
nn.load_index("index.dat",x)
nnidx2, nndist2 = nn.nn_index(x_query, checks=kwargs["checks"])
del nn
correct = all(nnidx == nnidx2)
self.assert_(correct)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "50824ef4fff6171e84e562fda013c7ee",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 110,
"avg_line_length": 32.40384615384615,
"alnum_prop": 0.627893175074184,
"repo_name": "piskvorky/flann",
"id": "9e15df009eb9c248a55877984d93d1280bc7c772",
"size": "3393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_index_save.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import plugins
from messenger import Messenger
| {
"content_hash": "740e8e0e16859cdbbdf7a2a14ce89e37",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 31,
"avg_line_length": 23.5,
"alnum_prop": 0.8723404255319149,
"repo_name": "beck/clean-py-app",
"id": "d79f266e326bfe3bf544be001c8b68daab3e2424",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "520"
}
],
"symlink_target": ""
} |
from django.template import Library, Node, Template, TemplateSyntaxError, \
Variable
from django.utils.translation import ugettext as _
from userprofile.models import Avatar, AVATAR_SIZES
from django.contrib.auth.models import User
from django.conf import settings
try:
from PIL import Image
except ImportError:
import Image
# from PythonMagick import Image
#from utils.TuxieMagick import Image
import os
import urlparse
import time
register = Library()
if hasattr(settings, "DEFAULT_AVATAR") and settings.DEFAULT_AVATAR:
DEFAULT_AVATAR = settings.DEFAULT_AVATAR
else:
DEFAULT_AVATAR = os.path.join(settings.MEDIA_ROOT, "userprofile", "generic.jpg")
class ResizedThumbnailNode(Node):
def __init__(self, size, username=None):
try:
self.size = int(size)
except:
self.size = Variable(size)
if username:
self.user = Variable(username)
else:
self.user = Variable("user")
def render(self, context):
# If size is not an int, then it's a Variable, so try to resolve it.
if not isinstance(self.size, int):
self.size = int(self.size.resolve(context))
if not self.size in AVATAR_SIZES:
return ''
try:
user = self.user.resolve(context)
avatar = Avatar.objects.get(user=user, valid=True).image
avatar_path = avatar.path
if not os.path.isfile(avatar_path):
raise
base, filename = os.path.split(avatar_path)
name, extension = os.path.splitext(filename)
filename = os.path.join(base, "%s.%s%s" % (name, self.size, extension))
url_tuple = urlparse.urlparse(avatar.url)
url = urlparse.urljoin(url_tuple[2], "%s.%s%s" % (name, self.size, extension))
except:
avatar_path = DEFAULT_AVATAR
base, filename = os.path.split(avatar_path)
generic, extension = os.path.splitext(filename)
filename = os.path.join(base, "%s.%s%s" % (generic, self.size, extension))
url = filename.replace(settings.MEDIA_ROOT, '')
if not os.path.isfile(filename):
image = Image.open(avatar_path)
image.thumbnail((self.size, self.size), Image.ANTIALIAS)
image.save(filename, "JPEG")
return url
@register.tag('avatar')
def Thumbnail(parser, token):
bits = token.contents.split()
username = None
if len(bits) > 3:
raise TemplateSyntaxError, _(u"You have to provide only the size as \
an integer (both sides will be equal) and optionally, the \
username.")
elif len(bits) == 3:
username = bits[2]
elif len(bits) < 2:
bits.append("96")
return ResizedThumbnailNode(bits[1], username)
| {
"content_hash": "36cf64947c44c5636ded9de458e1f41e",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 90,
"avg_line_length": 34.69512195121951,
"alnum_prop": 0.6165202108963093,
"repo_name": "jazzido/django-profile",
"id": "2ac437d34997afc4daef986871f52674772b10c6",
"size": "2860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/templatetags/avatars.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "75148"
},
{
"name": "Python",
"bytes": "71307"
}
],
"symlink_target": ""
} |
"""
.. module: lemur.users.models
:platform: unix
:synopsis: This module contains all of the models need to create a user within
lemur
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
from sqlalchemy.orm import relationship
from sqlalchemy import Integer, String, Column, Boolean
from sqlalchemy.event import listen
from sqlalchemy_utils.types.arrow import ArrowType
from lemur.database import db
from lemur.models import roles_users
from lemur.extensions import bcrypt
def hash_password(mapper, connect, target):
"""
Helper function that is a listener and hashes passwords before
insertion into the database.
:param mapper:
:param connect:
:param target:
"""
target.hash_password()
class User(db.Model):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
password = Column(String(128))
active = Column(Boolean())
confirmed_at = Column(ArrowType())
username = Column(String(255), nullable=False, unique=True)
email = Column(String(128), unique=True)
profile_picture = Column(String(255))
roles = relationship('Role', secondary=roles_users, passive_deletes=True, backref=db.backref('user'), lazy='dynamic')
certificates = relationship('Certificate', backref=db.backref('user'), lazy='dynamic')
authorities = relationship('Authority', backref=db.backref('user'), lazy='dynamic')
logs = relationship('Log', backref=db.backref('user'), lazy='dynamic')
def check_password(self, password):
"""
Hash a given password and check it against the stored value
to determine it's validity.
:param password:
:return:
"""
if self.password:
return bcrypt.check_password_hash(self.password, password)
def hash_password(self):
"""
Generate the secure hash for the password.
:return:
"""
if self.password:
self.password = bcrypt.generate_password_hash(self.password).decode('utf-8')
@property
def is_admin(self):
"""
Determine if the current user has the 'admin' role associated
with it.
:return:
"""
for role in self.roles:
if role.name == 'admin':
return True
def __repr__(self):
return "User(username={username})".format(username=self.username)
listen(User, 'before_insert', hash_password)
| {
"content_hash": "7b8eb1d1fc34b7c22843e20e07a1b913",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 121,
"avg_line_length": 29.929411764705883,
"alnum_prop": 0.6560534591194969,
"repo_name": "nevins-b/lemur",
"id": "a92753e4c525ea2b2e68cb6e313275c1ede7d3f5",
"size": "2544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lemur/users/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2688"
},
{
"name": "HTML",
"bytes": "181370"
},
{
"name": "JavaScript",
"bytes": "13785"
},
{
"name": "Makefile",
"bytes": "2581"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "610910"
}
],
"symlink_target": ""
} |
import os
import unittest
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry import value
from telemetry.value import none_values
from telemetry.value import scalar
class TestBase(unittest.TestCase):
def setUp(self):
ps = page_set.PageSet(base_dir=os.path.dirname(__file__))
ps.AddStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
ps.AddStory(page_module.Page('http://www.baz.com/', ps, ps.base_dir))
ps.AddStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
self.page_set = ps
@property
def pages(self):
return self.page_set.pages
class ValueTest(TestBase):
def testBuildbotValueType(self):
page0 = self.pages[0]
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True)
self.assertEquals('default', v.GetBuildbotDataType(
value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
self.assertEquals([3], v.GetBuildbotValue())
self.assertEquals(('x', page0.display_name),
v.GetChartAndTraceNameForPerPageResult())
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=False)
self.assertEquals(
'unimportant',
v.GetBuildbotDataType(value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
def testScalarSamePageMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1)
v1 = scalar.ScalarValue(page0, 'x', 'unit', 2)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
def testScalarDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1)
v1 = scalar.ScalarValue(page1, 'x', 'unit', 2)
vM = scalar.ScalarValue.MergeLikeValuesFromDifferentPages([v0, v1])
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
def testScalarWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1)
v1 = scalar.ScalarValue(page0, 'x', 'unit', None, none_value_reason='n')
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(None, vM.values)
self.assertEquals(none_values.MERGE_FAILURE_REASON,
vM.none_value_reason)
def testScalarWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: scalar.ScalarValue(page0, 'x', 'unit', None))
def testScalarWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: scalar.ScalarValue(page0, 'x', 'unit', 1,
none_value_reason='n'))
def testAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', 42, important=False)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'value': 42
})
def testNoneValueAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', None, important=False,
none_value_reason='n')
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'value': None,
'none_value_reason': 'n'
})
def testFromDictInt(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42)
def testFromDictFloat(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42.4
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42.4)
def testFromDictNoneValue(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': None,
'none_value_reason': 'n'
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, None)
self.assertEquals(v.none_value_reason, 'n')
| {
"content_hash": "072f3936c670da0a236bc8af2944e86c",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 78,
"avg_line_length": 31.524475524475523,
"alnum_prop": 0.6404170363797693,
"repo_name": "SaschaMester/delicium",
"id": "8bd7a53661ba8c101b6d83cd14b3ad68567846f4",
"size": "4670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/value/scalar_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "4171711"
},
{
"name": "C++",
"bytes": "243066171"
},
{
"name": "CSS",
"bytes": "935112"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27211018"
},
{
"name": "Java",
"bytes": "14285999"
},
{
"name": "JavaScript",
"bytes": "20413885"
},
{
"name": "Makefile",
"bytes": "23496"
},
{
"name": "Objective-C",
"bytes": "1725804"
},
{
"name": "Objective-C++",
"bytes": "9880229"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "478406"
},
{
"name": "Python",
"bytes": "8261413"
},
{
"name": "Shell",
"bytes": "482077"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import io
from builtins import *
from lxml import etree
from lxml import objectify
import json
import sys
sys.path.append('..')
import pyreqif.pyreqif
from pprint import pprint
import re
def pretty(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent + 1)
else:
print('\t' * (indent + 1) + str(value))
transLationTable = {"IDENTIFIER": "identifier",
"COUNTRY-CODE": "countryCode",
"CREATION-TIME": "creationTime",
"TITLE": "title",
"COMMENT": "comment",
"AUTHOR": "author",
"LONG-NAME": "longName",
"VERSION": "version",
"SOURCE-TOOL-ID": "sourceToolId",
"LAST-CHANGE": "lastChange",
"EMBEDDED": "embedded",
"TYPE": "type",
"VALUES": "values",
"CONTENT-REF": "contentref",
"CONTENT": "content",
"DESC": "desc"}
mapReqifAttributeValue = {"default": "embeddedDoc",
"ATTRIBUTE-VALUE-EMBEDDED-DOCUMENT": "embeddedDoc",
"ATTRIBUTE-VALUE-STRING": "string",
"ATTRIBUTE-VALUE-XHTML": "embeddedDoc",
"ATTRIBUTE-VALUE-BOOLEAN": "embeddedDoc",
"ATTRIBUTE-VALUE-INTEGER": "embeddedDoc"}
mapReqifAttributeDefinition = {"default": "complex",
"ATTRIBUTE-DEFINITION-COMPLEX": "complex",
"ATTRIBUTE-DEFINITION-STRING": "string",
"ATTRIBUTE-DEFINITION-XHTML": "complex",
"ATTRIBUTE-DEFINITION-BOOLEAN": "complex",
"ATTRIBUTE-DEFINITION-INTEGER": "complex"}
mapReqifDatatypeDefinition = {"default": "document",
"DATATYPE-DEFINITION-DOCUMENT": "document",
"DATATYPE-DEFINITION-STRING": "string",
"DATATYPE-DEFINITION-XHTML": "document",
"DATATYPE-DEFINITION-BOOLEAN": "document",
"DATATYPE-DEFINITION-INTEGER": "document"}
transLationTableReverse = dict(map(reversed, transLationTable.items()))
mapReqifAttributeValueReversed = dict(map(reversed, mapReqifAttributeValue.items()))
mapReqifAttributeDefinitionReversed = dict(map(reversed, mapReqifAttributeDefinition.items()))
mapReqifDatatypeDefinitionReversed = dict(map(reversed, mapReqifDatatypeDefinition.items()))
def mapReqifAttributeValue2Py(elem: str):
if elem in mapReqifAttributeValue:
return mapReqifAttributeValue[elem]
else:
print("Not supported datatype: ")
print(elem)
return mapReqifAttributeValue['default']
def mapPy2ReqifAttributeValue(elem: str):
if elem in mapReqifAttributeValueReversed:
return mapReqifAttributeValueReversed[elem]
else:
print("Not supported datatype: ")
print(elem)
return mapReqifAttributeValueReversed['default']
def mapReqifAttributeDefinition2Py(elem: str):
if elem in mapReqifAttributeDefinition:
return mapReqifAttributeDefinition[elem]
else:
print("Not supported attribute definition: ")
print(elem)
return mapReqifAttributeDefinition['default']
def mapPy2ReqifAttributeDefinition(elem: str):
if elem in mapReqifAttributeDefinitionReversed:
return mapReqifAttributeDefinitionReversed[elem]
else:
print("Not supported attribute definition: ")
print(elem)
return mapReqifAttributeDefinitionReversed['default']
def mapReqifDatatypeDefinition2Py(elem: str):
if elem in mapReqifDatatypeDefinition:
return mapReqifDatatypeDefinition[elem]
else:
print("Not supported datatype definition: ")
print(elem)
return mapReqifDatatypeDefinition['default']
def mapPy2ReqifDatatypeDefinition(elem: str):
if elem in mapReqifDatatypeDefinitionReversed:
return mapReqifDatatypeDefinitionReversed[elem]
else:
print("Not supported datatype datatype: ")
print(elem)
return mapReqifDatatypeDefinitionReversed['default']
def py2reqif(myDict):
MyNewDict = {}
for pyname in myDict:
if pyname in transLationTableReverse:
reqifname = transLationTableReverse[pyname]
MyNewDict[reqifname] = myDict[pyname]
else:
MyNewDict[pyname] = myDict[pyname]
return MyNewDict
def reqif2py(myDict):
MyNewDict = {}
for reqifname in myDict:
if reqifname in transLationTable:
pyname = transLationTable[reqifname]
MyNewDict[pyname] = myDict[reqifname]
else:
MyNewDict[reqifname] = myDict[reqifname]
return MyNewDict
def load(f):
inputType = "RIF"
doc = pyreqif.pyreqif.doc()
tree = etree.parse(f)
root = tree.getroot()
ns = "{" + tree.xpath('namespace-uri(.)') + "}"
nsp = tree.xpath('namespace-uri(.)')
def getSubElementValuesByTitle(xmlElement, tagNameArray=[]):
defaultsSubElements = ['IDENTIFIER', 'LAST-CHANGE', 'LONG-NAME']
# ALTERNATIVE-ID ?
tagNameArray = list(set(defaultsSubElements + tagNameArray))
returnDict = {}
for tag in tagNameArray:
if tag in xmlElement.attrib:
returnDict[tag] = xmlElement.attrib[tag]
else:
temp = xmlElement.find('./' + ns + tag)
if temp is not None:
returnDict[tag] = temp.text
return returnDict
if root.tag == ns + "REQ-IF":
inputType = "REQIF"
headerRoot = root.find('./' + ns + 'THE-HEADER/' + ns + 'REQ-IF-HEADER')
contentRoot = root.find('./' + ns + 'CORE-CONTENT/' + ns + 'REQ-IF-CONTENT')
else:
headerRoot = root
contentRoot = root
headerTags = getSubElementValuesByTitle(headerRoot,
['AUTHOR', 'COMMENT', 'COUNTRY-CODE', 'CREATION-TIME', 'SOURCE-TOOL-ID',
'TITLE', 'VERSION'])
# header missing:
# COMMENT, REPOSITORY-ID, REQ-IF-TOOL-ID, REQ-IF-VERSION
doc.addHeader(reqif2py(headerTags))
datatypesXmlElement = contentRoot.find('./' + ns + 'DATATYPES')
for child in datatypesXmlElement:
if child.tag == ns + "DATATYPE-DEFINITION-DOCUMENT" or child.tag == ns + 'DATATYPE-DEFINITION-STRING' or child.tag == ns + 'DATATYPE-DEFINITION-XHTML' \
or child.tag == ns + 'DATATYPE-DEFINITION-BOOLEAN' or child.tag == ns + "DATATYPE-DEFINITION-INTEGER":
datatypeProto = getSubElementValuesByTitle(child, ['EMBEDDED'])
tagWithoutNamespace = re.sub('{[\S]*}', '', child.tag)
datatypeProto['type'] = mapReqifDatatypeDefinition2Py(tagWithoutNamespace)
doc.addDatatype(reqif2py(datatypeProto))
elif child.tag == ns + "DATATYPE-DEFINITION-ENUMERATION":
datatypeProto = getSubElementValuesByTitle(child, ['EMBEDDED'])
datatypeProto['type'] = "enum"
specifiedValues = child.find('./' + ns + "SPECIFIED-VALUES")
values = {}
for valElement in specifiedValues:
tempDict = getSubElementValuesByTitle(valElement)
properties = valElement.find('./' + ns + "PROPERTIES")
embeddedValue = properties.find('./' + ns + "EMBEDDED-VALUE")
tempDict['properites'] = reqif2py(getSubElementValuesByTitle(embeddedValue, ['KEY', 'OTHER-CONTENT']))
tempDict = reqif2py(tempDict)
values[tempDict['identifier']] = tempDict
datatypeProto['values'] = values
doc.addDatatype(reqif2py(datatypeProto))
else:
# missing:
# DATATYPE-DEFINITION-BOOLEAN
# DATATYPE-DEFINITION-DATE
# DATATYPE-DEFINITION-INTEGER
# DATATYPE-DEFINITION-REAL
print("Not supported datatype: ", )
print(child.tag)
specTypesXmlElement = contentRoot.find('./' + ns + 'SPEC-TYPES')
for child in specTypesXmlElement:
if child.tag == ns + "SPEC-TYPE" or child.tag == ns + "SPEC-OBJECT-TYPE":
specType = getSubElementValuesByTitle(child, ['DESC'])
# specType = getSubElementValuesByTitle(child)
attributesXml = child.find('./' + ns + "SPEC-ATTRIBUTES")
if attributesXml is not None:
for attribute in attributesXml:
if attribute.tag == ns + "ATTRIBUTE-DEFINITION-COMPLEX" or attribute.tag == ns + "ATTRIBUTE-DEFINITION-STRING" or attribute.tag == ns + "ATTRIBUTE-DEFINITION-XHTML" \
or attribute.tag == ns + "ATTRIBUTE-DEFINITION-BOOLEAN" or attribute.tag == ns + "ATTRIBUTE-DEFINITION-INTEGER":
specAttribType = getSubElementValuesByTitle(attribute)
tagWithoutNamespace = re.sub('{[\S]*}', '', attribute.tag)
specAttribType["type"] = mapReqifAttributeDefinition2Py(tagWithoutNamespace)
typeTag = attribute.find('./' + ns + 'TYPE')
if typeTag is not None:
reference = typeTag.getchildren()[0]
# reference = typeTag.find('./' + ns + 'DATATYPE-DEFINITION-DOCUMENT-REF')
reference = typeTag.getchildren()[0]
if doc.datatypeById(reference.text):
specAttribType['typeRef'] = reference.text
else:
print("BEEP unknown Datatype")
elif attribute.tag == ns + "ATTRIBUTE-DEFINITION-ENUMERATION":
specAttribType = getSubElementValuesByTitle(attribute)
specAttribType["type"] = "enum"
typeRef = attribute.find('./' + ns + 'TYPE/' + ns + 'DATATYPE-DEFINITION-ENUMERATION-REF')
if typeRef is not None:
specAttribType['typeRef'] = typeRef.text
defaultValue = attribute.find(
'./' + ns + 'DEFAULT-VALUE/' + ns + 'ATTRIBUTE-VALUE-ENUMERATION/' + ns + 'VALUES/' + ns + 'ENUM-VALUE-REF')
if defaultValue is not None:
specAttribType['defaultValue'] = defaultValue.text
else:
print("Not supported Attribute: ", )
print(attribute.tag)
specAttribType = reqif2py(specAttribType)
specType[specAttribType['identifier']] = specAttribType
# specType[specAttribType['identifier']].pop('identifier')
doc.addRequirementType(reqif2py(specType))
def remove_namespaces(thedoc):
# http://wiki.tei-c.org/index.php/Remove-Namespaces.xsl
xslt = '''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/|comment()|processing-instruction()">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:attribute name="{local-name()}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
'''
# xslt_doc = etree.parse(io.BytesIO(xslt))
xslt_doc = etree.parse(io.BytesIO(bytes(xslt, "utf8")))
transform = etree.XSLT(xslt_doc)
ret = transform(thedoc)
return ret
specObjectsXmlElement = contentRoot.find('./' + ns + 'SPEC-OBJECTS')
for requirementXml in specObjectsXmlElement:
requirement = None
if requirementXml.tag == ns + "SPEC-OBJECT":
requirement = getSubElementValuesByTitle(requirementXml)
typeRefXml = requirementXml.find('./' + ns + 'TYPE/' + ns + 'SPEC-TYPE-REF')
if typeRefXml is None:
typeRefXml = requirementXml.find('./' + ns + 'TYPE/' + ns + 'SPEC-OBJECT-TYPE-REF')
if typeRefXml is not None:
requirement["typeRef"] = typeRefXml.text
valuesXml = requirementXml.find('./' + ns + 'VALUES')
if valuesXml is None:
valuesXml = []
values = {}
for valueXml in valuesXml:
value = getSubElementValuesByTitle(valueXml)
# TODO : Support other types
if valueXml.tag == ns + 'ATTRIBUTE-VALUE-EMBEDDED-DOCUMENT' or valueXml.tag == ns + 'ATTRIBUTE-VALUE-STRING' or valueXml.tag == ns + 'ATTRIBUTE-VALUE-XHTML' \
or valueXml.tag == ns + 'ATTRIBUTE-VALUE-BOOLEAN' or valueXml.tag == ns + 'ATTRIBUTE-VALUE-INTEGER':
attributeRefXml = valueXml.find('./' + ns + 'DEFINITION').getchildren()[0]
value['attributeRef'] = attributeRefXml.text
if 'THE-VALUE' in valueXml.attrib:
value["content"] = valueXml.attrib['THE-VALUE']
else:
contentXml = valueXml.find(
'./' + ns + 'XHTML-CONTENT/{http://automotive-his.de/200706/rif-xhtml}div')
if contentXml is None:
contentXml = valueXml.find("./" + ns + 'THE-VALUE/{http://www.w3.org/1999/xhtml}div')
value["content"] = etree.tostring(remove_namespaces(contentXml))
# value["content"] = "".join(contentXml.itertext())
tagWithoutNamespace = re.sub('{[\S]*}', '', valueXml.tag)
value["type"] = mapReqifAttributeValue2Py(tagWithoutNamespace)
elif valueXml.tag == ns + 'ATTRIBUTE-VALUE-ENUMERATION':
value["type"] = "enum"
attributeRefXml = valueXml.find(
'./' + ns + 'DEFINITION/' + ns + 'ATTRIBUTE-DEFINITION-ENUMERATION-REF')
value['attributeRef'] = attributeRefXml.text
contentXml = valueXml.findall('./' + ns + 'VALUES/' + ns + 'ENUM-VALUE-REF')
if contentXml is not None:
value["contentRef"] = []
for content in contentXml:
value["contentRef"].append(content.text)
a = 1
else:
value["contentRef"] = None
else:
print("valueType not supported yet:", )
print(valueXml.tag[len(ns):])
values[value['attributeRef']] = reqif2py(value)
requirement["values"] = values
else:
print("Unknown spec object tag:", )
print(requirementXml.tag)
if requirement != None:
doc.addRequirement(reqif2py(requirement))
specGroupsXml = contentRoot.find('./' + ns + 'SPEC-GROUPS')
if specGroupsXml is not None:
for specGroupXml in specGroupsXml:
if specGroupXml.tag == ns + "SPEC-GROUP":
specification = getSubElementValuesByTitle(specGroupXml, ['DESC'])
spec = pyreqif.pyreqif.specification(**reqif2py(specification))
specObjectsXml = specGroupXml.find('./' + ns + 'SPEC-OBJECTS')
for specObjectRef in specObjectsXml:
spec.addReq(specObjectRef.text)
doc.addSpecification(spec)
def getHierarchy(hierarchyEle, inputType):
hierarchyDict = getSubElementValuesByTitle(hierarchyEle)
typeRef = hierarchyEle.find('./' + ns + 'TYPE/' + ns + 'SPEC-TYPE-REF')
if typeRef is not None:
hierarchyDict["typeRef"] = typeRef.text
objectRef = hierarchyEle.find('./' + ns + 'OBJECT/' + ns + 'SPEC-OBJECT-REF')
if objectRef is not None:
hierarchyDict["objectRef"] = objectRef.text
hierarchy = pyreqif.pyreqif.hierarchy(**reqif2py(hierarchyDict))
children = hierarchyEle.find('./' + ns + 'CHILDREN')
if children is not None:
for child in children:
hierarchy.addChild(getHierarchy(child, inputType))
return hierarchy
if inputType == "RIF":
hierarchyRoots = contentRoot.find('./' + ns + 'SPEC-HIERARCHY-ROOTS')
elif inputType == "REQIF":
hierarchyRoots = contentRoot.find('./' + ns + 'SPECIFICATIONS')
for hierarchyRoot in hierarchyRoots:
doc.hierarchy.append(getHierarchy(hierarchyRoot, inputType))
# SPEC-HIERARCHY
relations = {}
specRelsXml = contentRoot.find('./' + ns + 'SPEC-RELATIONS')
if specRelsXml is not None:
for specRelXml in specRelsXml:
if specRelXml.tag == ns + "SPEC-RELATION":
relation = getSubElementValuesByTitle(specRelXml)
typeRef = specRelXml.find('./' + ns + 'TYPE')
if typeRef is not None:
relation["typeRef"] = typeRef.getchildren()[0].text
sourceRef = specRelXml.find('./' + ns + 'SOURCE/' + ns + 'SPEC-OBJECT-REF')
if sourceRef is not None:
relation["sourceRef"] = sourceRef.text
targetRef = specRelXml.find('./' + ns + 'TARGET/' + ns + 'SPEC-OBJECT-REF')
if targetRef is not None:
relation["targetRef"] = targetRef.text
doc.addRelation(reqif2py(relation))
return doc
attributesForElements = ["IDENTIFIER", "LAST-CHANGE", "LONG-NAME", "MAX-LENGTH", "MAX", "MIN", "ACCURACY",
"OTHER-CONTENT", "KEY", "MULTI-VALUED"]
notUsedAttributes = ["COUNTRY-CODE", "EMBEDDED", "AUTHOR", "VERSION", "DESC", "contentRef"]
def createSubElements(parent, myDict):
for key in myDict:
if key in attributesForElements or key in notUsedAttributes:
continue
sn = etree.SubElement(parent, key)
if myDict[key] is not None:
sn.text = myDict[key]
else:
sn.text = 'None'
def createSubElement(parent, tag, text=None, attributes=None):
sn = etree.SubElement(parent, tag)
if text is not None:
sn.text = text
if attributes is not None:
for attributeName in attributesForElements:
if attributeName in attributes and attributes[
attributeName] is not None and attributeName not in notUsedAttributes:
sn.attrib[attributeName] = attributes[attributeName]
return sn
def dump(doc, f):
xsi = 'http://www.w3.org/2001/XMLSchema-instance'
arVersion = "1"
root = etree.Element(
'REQ-IF',
nsmap={
None: 'http://www.omg.org/spec/ReqIF/20110401/reqif.xsd',
'xhtml': "http://www.w3.org/1999/xhtml",
'id': "http://pror.org/presentation/id",
"configuration": "http://eclipse.org/rmf/pror/toolextensions/1.0",
})
#
# HEADER
#
theheader = createSubElement(root, "THE-HEADER")
headerXML = createSubElement(theheader, "REQ-IF-HEADER", attributes=py2reqif(doc.header))
tempDict = py2reqif(doc.header)
tempDict["REQ-IF-TOOL-ID"] = tempDict["SOURCE-TOOL-ID"]
tempDict["REQ-IF-VERSION"] = "1.0"
tempDict["SOURCE-TOOL-ID"] = "pyreqif"
for tagName in ["COMMENT", "CREATION-TIME", "REQ-IF-TOOL-ID", "REQ-IF-VERSION", "SOURCE-TOOL-ID", "TITLE"]:
createSubElement(headerXML, tagName, tempDict[tagName])
coreContent = createSubElement(root, "CORE-CONTENT")
reqIfContent = createSubElement(coreContent, "REQ-IF-CONTENT")
#
# DATATYPES
#
datatypesXml = createSubElement(reqIfContent, "DATATYPES")
for datatype in doc.datatypeList:
if datatype.mytype == "document":
myDict = py2reqif(datatype.toDict())
datatypeXml = createSubElement(datatypesXml, "DATATYPE-DEFINITION-XHTML", attributes=myDict)
del myDict["TYPE"]
createSubElements(datatypeXml, myDict)
if datatype.mytype == "string":
myDict = py2reqif(datatype.toDict())
datatypeXml = createSubElement(datatypesXml, "DATATYPE-DEFINITION-STRING", attributes=myDict)
del myDict["TYPE"]
createSubElements(datatypeXml, myDict)
if datatype.mytype == "enum":
datatypeXml = createSubElement(datatypesXml, "DATATYPE-DEFINITION-ENUMERATION",
attributes=py2reqif(datatype.toDict()))
myDict = py2reqif(datatype.toDict())
del myDict["TYPE"]
createSubElements(datatypeXml, myDict)
specifiedValuesXml = createSubElement(datatypeXml, "SPECIFIED-VALUES")
for value, label in datatype.valueTable.items():
valuesXml = createSubElement(specifiedValuesXml, "ENUM-VALUE", attributes=py2reqif(label))
# createSubElement(valuesXml, "IDENTIFIER", value)
for element, content in py2reqif(label).items():
if element == "properites":
props = createSubElement(valuesXml, "PROPERTIES")
createSubElement(props, "EMBEDDED-VALUE", attributes=py2reqif(content))
elif element not in attributesForElements:
createSubElement(valuesXml, element, content)
#
# SPEC-TYPES
#
specTypes = createSubElement(reqIfContent, "SPEC-TYPES")
for reqType in doc.requirementTypeList:
specType = createSubElement(specTypes, "SPEC-OBJECT-TYPE", attributes=py2reqif(reqType.toDict()))
createSubElements(specType, py2reqif(reqType.toDict()))
if len(reqType.myTypes) > 0:
attributesXml = createSubElement(specType, "SPEC-ATTRIBUTES")
for mytype, ref in reqType.myTypes.items():
attribDict = py2reqif(ref.toDict())
if "TYPE" in attribDict and attribDict["TYPE"] == "enum":
attribDict.pop("TYPE")
attribDict["MULTI-VALUED"] = "false"
enumXml = createSubElement(attributesXml, "ATTRIBUTE-DEFINITION-ENUMERATION", attributes=attribDict)
for value, label in attribDict.items():
if value == "typeRef":
typeXml = createSubElement(enumXml, "TYPE")
createSubElement(typeXml, "DATATYPE-DEFINITION-ENUMERATION-REF", label)
elif value not in attributesForElements:
createSubElement(enumXml, value, label)
if "TYPE" in attribDict and attribDict["TYPE"] == "complex":
# attribDict.pop("TYPE")
enumXml = createSubElement(attributesXml, "ATTRIBUTE-DEFINITION-XHTML", attributes=attribDict)
attribDict.pop("TYPE")
for value, label in attribDict.items():
if value == "typeRef":
typeXml = createSubElement(enumXml, "TYPE")
createSubElement(typeXml, "DATATYPE-DEFINITION-XHTML-REF", label)
elif value not in attributesForElements and value not in notUsedAttributes:
createSubElement(enumXml, value, label)
if "TYPE" in attribDict and attribDict["TYPE"] == "string":
# attribDict.pop("TYPE")
enumXml = createSubElement(attributesXml, "ATTRIBUTE-DEFINITION-STRING", attributes=attribDict)
attribDict.pop("TYPE")
for value, label in attribDict.items():
if value == "typeRef":
typeXml = createSubElement(enumXml, "TYPE")
createSubElement(typeXml, "DATATYPE-DEFINITION-STRING-REF", label)
elif value not in attributesForElements and value not in notUsedAttributes:
createSubElement(enumXml, value, label)
#
# SPEC-OBJECTS
#
specsXml = createSubElement(reqIfContent, "SPEC-OBJECTS")
for req in doc.requirementList:
specXml = createSubElement(specsXml, "SPEC-OBJECT", attributes=py2reqif(req.toDict()))
requirementDict = py2reqif(req.toDict())
for value, label in requirementDict.items():
if value == "VALUES":
valuesXml = createSubElement(specXml, "VALUES")
for value in label:
tempDict = py2reqif(value.toDict())
if "LONG-NAME" in tempDict:
tempDict.pop("LONG-NAME")
if "LAST-CHANGE" in tempDict:
tempDict.pop("LAST-CHANGE")
if "IDENTIFIER" in tempDict:
tempDict.pop("IDENTIFIER")
if value.mytype == "enum":
valueXml = createSubElement(valuesXml, "ATTRIBUTE-VALUE-ENUMERATION", attributes=tempDict)
valuesValuesXml = createSubElement(valueXml, "VALUES")
valuesDefinitionsXml = createSubElement(valueXml, "DEFINITION")
else:
valueXml = createSubElement(valuesXml, "ATTRIBUTE-VALUE-XHTML", attributes=tempDict)
valuesDefinitionsXml = createSubElement(valueXml, "DEFINITION")
for val, lab in py2reqif(value.toDict()).items():
if val == "contentRef" and lab is not None:
createSubElement(valuesValuesXml, "ENUM-VALUE-REF", lab[0])
elif val == "attributeRef":
if value.mytype == "enum":
createSubElement(valuesDefinitionsXml, "ATTRIBUTE-DEFINITION-ENUMERATION-REF", lab)
elif value.mytype == "embeddedDoc":
createSubElement(valuesDefinitionsXml, "ATTRIBUTE-DEFINITION-XHTML-REF", lab)
elif value.mytype == "string":
createSubElement(valuesDefinitionsXml, "ATTRIBUTE-DEFINITION-STRING-REF", lab)
else:
print("Unknown Type " + value.mytype)
elif val == "TYPE":
pass
elif val == "CONTENT":
if lab is not None:
if '<' in str(lab):
labtree = etree.parse(io.BytesIO(lab))
labroot = labtree.getroot()
for el in labroot.iter():
el.tag = '{http://www.w3.org/1999/xhtml}' + el.tag
contentXml = createSubElement(valueXml, "THE-VALUE")
contentXml.append(labroot)
else:
createSubElement(valueXml, "THE-VALUE", lab)
elif val not in attributesForElements and val not in notUsedAttributes:
createSubElement(valueXml, val, lab)
elif value == "typeRef":
typeXml = createSubElement(specXml, "TYPE")
createSubElement(typeXml, "SPEC-OBJECT-TYPE-REF", label)
elif value not in attributesForElements:
createSubElement(specXml, value, label)
#
# SPEC-RELATIONS
#
specsRelXml = createSubElement(reqIfContent, "SPEC-RELATIONS")
for relation in doc.relations:
specsRel = createSubElement(specsRelXml, "SPEC-RELATION")
for value, label in py2reqif(relation).items():
if value == "typeRef":
typeXml = createSubElement(specsRel, "TYPE")
createSubElement(typeXml, "SPEC-TYPE-REF", label)
elif value == "sourceRef":
sourceXml = createSubElement(specsRel, "SOURCE")
createSubElement(sourceXml, "SPEC-OBJECT-REF", label)
elif value == "targetRef":
targetXml = createSubElement(specsRel, "TARGET")
createSubElement(targetXml, "SPEC-OBJECT-REF", label)
else:
createSubElement(specsRel, value, label)
#
# SPEC-GROUPS
#
# specGroupsXml = createSubElement(reqIfContent, "SPEC-GROUPS")
# for specification in doc.specificationList:
# specGroupXml = createSubElement(specGroupsXml, "SPEC-GROUP")
# for value,label in py2reqif(specification.toDict()).iteritems():
# createSubElement(specGroupXml,value,label)
# specObjectsXml = createSubElement(specGroupXml,"SPEC-OBJECTS")
# for req in specification:
# createSubElement(specObjectsXml ,"SPEC-OBJECT-REF", req)
#
# SPEC-HIERARCHY-ROOTS
#
def createChildHirachy(parentXmlTag, childObject):
childrenXml = createSubElement(parentXmlTag, "CHILDREN")
hierarchXml = createSubElement(childrenXml, "SPEC-HIERARCHY", attributes=py2reqif(childObject.toDict()))
for value, label in py2reqif(childObject.toDict()).items():
if value == "objectRef":
objectXml = createSubElement(hierarchXml, "OBJECT")
createSubElement(objectXml, "SPEC-OBJECT-REF", label)
elif value not in attributesForElements:
if label is not None:
createSubElement(hierarchXml, value, label)
for child in childObject.children:
createChildHirachy(hierarchXml, child)
specHierarchRootsXml = createSubElement(reqIfContent, "SPECIFICATIONS")
# SPEC-HIERARCHY-ROOT
for hierarch in doc.hierarchy:
specHierarchRootXml = createSubElement(specHierarchRootsXml, "SPECIFICATION",
attributes=py2reqif(hierarch.toDict()))
for value, label in py2reqif(hierarch.toDict()).items():
if value == "typeRef":
typeXml = createSubElement(specHierarchRootXml, "TYPE")
createSubElement(typeXml, "SPECIFICATION-TYPE-REF", label)
elif value not in attributesForElements:
createSubElement(specHierarchRootXml, value, label)
for child in hierarch.children:
createChildHirachy(specHierarchRootXml, child)
f.write(etree.tostring(root, pretty_print=True, xml_declaration=True))
| {
"content_hash": "944966f9c548f85fcc9597398968ca88",
"timestamp": "",
"source": "github",
"line_count": 670,
"max_line_length": 186,
"avg_line_length": 46.58955223880597,
"alnum_prop": 0.5665865769662022,
"repo_name": "ebroecker/pyreqif",
"id": "440989e217f36e852c1252417ae39a1ab328a15f",
"size": "31262",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/pyreqif/rif.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "184238"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
import gym
import numpy as np
import random
import time
import unittest
from collections import Counter
import ray
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.policy.tests.test_policy import TestPolicy
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.rllib.env.vector_env import VectorEnv
from ray.tune.registry import register_env
class MockPolicy(TestPolicy):
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
return [random.choice([0, 1])] * len(obs_batch), [], {}
def postprocess_trajectory(self,
batch,
other_agent_batches=None,
episode=None):
assert episode is not None
return compute_advantages(
batch, 100.0, 0.9, use_gae=False, use_critic=False)
class BadPolicy(MockPolicy):
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
raise Exception("intentional error")
class FailOnStepEnv(gym.Env):
def __init__(self):
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
raise ValueError("kaboom")
def step(self, action):
raise ValueError("kaboom")
class MockEnv(gym.Env):
def __init__(self, episode_length, config=None):
self.episode_length = episode_length
self.config = config
self.i = 0
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.i = 0
return self.i
def step(self, action):
self.i += 1
return 0, 1, self.i >= self.episode_length, {}
class MockEnv2(gym.Env):
def __init__(self, episode_length):
self.episode_length = episode_length
self.i = 0
self.observation_space = gym.spaces.Discrete(100)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.i = 0
return self.i
def step(self, action):
self.i += 1
return self.i, 100, self.i >= self.episode_length, {}
class MockVectorEnv(VectorEnv):
def __init__(self, episode_length, num_envs):
self.envs = [MockEnv(episode_length) for _ in range(num_envs)]
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
self.num_envs = num_envs
def vector_reset(self):
return [e.reset() for e in self.envs]
def reset_at(self, index):
return self.envs[index].reset()
def vector_step(self, actions):
obs_batch, rew_batch, done_batch, info_batch = [], [], [], []
for i in range(len(self.envs)):
obs, rew, done, info = self.envs[i].step(actions[i])
obs_batch.append(obs)
rew_batch.append(rew)
done_batch.append(done)
info_batch.append(info)
return obs_batch, rew_batch, done_batch, info_batch
def get_unwrapped(self):
return self.envs
class TestRolloutWorker(unittest.TestCase):
def test_basic(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"), policy=MockPolicy)
batch = ev.sample()
for key in [
"obs", "actions", "rewards", "dones", "advantages",
"prev_rewards", "prev_actions"
]:
self.assertIn(key, batch)
self.assertGreater(np.abs(np.mean(batch[key])), 0)
def to_prev(vec):
out = np.zeros_like(vec)
for i, v in enumerate(vec):
if i + 1 < len(out) and not batch["dones"][i]:
out[i + 1] = v
return out.tolist()
self.assertEqual(batch["prev_rewards"].tolist(),
to_prev(batch["rewards"]))
self.assertEqual(batch["prev_actions"].tolist(),
to_prev(batch["actions"]))
self.assertGreater(batch["advantages"][0], 1)
def test_batch_ids(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"), policy=MockPolicy)
batch1 = ev.sample()
batch2 = ev.sample()
self.assertEqual(len(set(batch1["unroll_id"])), 1)
self.assertEqual(len(set(batch2["unroll_id"])), 1)
self.assertEqual(
len(set(SampleBatch.concat(batch1, batch2)["unroll_id"])), 2)
def test_global_vars_update(self):
agent = A2CTrainer(
env="CartPole-v0",
config={
"lr_schedule": [[0, 0.1], [400, 0.000001]],
})
result = agent.train()
self.assertGreater(result["info"]["learner"]["cur_lr"], 0.01)
result2 = agent.train()
self.assertLess(result2["info"]["learner"]["cur_lr"], 0.0001)
def test_no_step_on_init(self):
register_env("fail", lambda _: FailOnStepEnv())
pg = PGTrainer(env="fail", config={"num_workers": 1})
self.assertRaises(Exception, lambda: pg.train())
def test_callbacks(self):
counts = Counter()
pg = PGTrainer(
env="CartPole-v0", config={
"num_workers": 0,
"sample_batch_size": 50,
"train_batch_size": 50,
"callbacks": {
"on_episode_start": lambda x: counts.update({"start": 1}),
"on_episode_step": lambda x: counts.update({"step": 1}),
"on_episode_end": lambda x: counts.update({"end": 1}),
"on_sample_end": lambda x: counts.update({"sample": 1}),
},
})
pg.train()
pg.train()
pg.train()
pg.train()
self.assertEqual(counts["sample"], 4)
self.assertGreater(counts["start"], 0)
self.assertGreater(counts["end"], 0)
self.assertGreater(counts["step"], 200)
self.assertLess(counts["step"], 400)
def test_query_evaluators(self):
register_env("test", lambda _: gym.make("CartPole-v0"))
pg = PGTrainer(
env="test",
config={
"num_workers": 2,
"sample_batch_size": 5,
"num_envs_per_worker": 2,
})
results = pg.workers.foreach_worker(lambda ev: ev.sample_batch_size)
results2 = pg.workers.foreach_worker_with_index(
lambda ev, i: (i, ev.sample_batch_size))
results3 = pg.workers.foreach_worker(
lambda ev: ev.foreach_env(lambda env: 1))
self.assertEqual(results, [10, 10, 10])
self.assertEqual(results2, [(0, 10), (1, 10), (2, 10)])
self.assertEqual(results3, [[1, 1], [1, 1], [1, 1]])
def test_reward_clipping(self):
# clipping on
ev = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
clip_rewards=True,
batch_mode="complete_episodes")
self.assertEqual(max(ev.sample()["rewards"]), 1)
result = collect_metrics(ev, [])
self.assertEqual(result["episode_reward_mean"], 1000)
# clipping off
ev2 = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
clip_rewards=False,
batch_mode="complete_episodes")
self.assertEqual(max(ev2.sample()["rewards"]), 100)
result2 = collect_metrics(ev2, [])
self.assertEqual(result2["episode_reward_mean"], 1000)
def test_hard_horizon(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes",
batch_steps=10,
episode_horizon=4,
soft_horizon=False)
samples = ev.sample()
# three logical episodes
self.assertEqual(len(set(samples["eps_id"])), 3)
# 3 done values
self.assertEqual(sum(samples["dones"]), 3)
def test_soft_horizon(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes",
batch_steps=10,
episode_horizon=4,
soft_horizon=True)
samples = ev.sample()
# three logical episodes
self.assertEqual(len(set(samples["eps_id"])), 3)
# only 1 hard done value
self.assertEqual(sum(samples["dones"]), 1)
def test_metrics(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes")
remote_ev = RolloutWorker.as_remote().remote(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes")
ev.sample()
ray.get(remote_ev.sample.remote())
result = collect_metrics(ev, [remote_ev])
self.assertEqual(result["episodes_this_iter"], 20)
self.assertEqual(result["episode_reward_mean"], 10)
def test_async(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
sample_async=True,
policy=MockPolicy)
batch = ev.sample()
for key in ["obs", "actions", "rewards", "dones", "advantages"]:
self.assertIn(key, batch)
self.assertGreater(batch["advantages"][0], 1)
def test_auto_vectorization(self):
ev = RolloutWorker(
env_creator=lambda cfg: MockEnv(episode_length=20, config=cfg),
policy=MockPolicy,
batch_mode="truncate_episodes",
batch_steps=2,
num_envs=8)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
indices = []
for env in ev.async_env.vector_env.envs:
self.assertEqual(env.unwrapped.config.worker_index, 0)
indices.append(env.unwrapped.config.vector_index)
self.assertEqual(indices, [0, 1, 2, 3, 4, 5, 6, 7])
def test_batches_larger_when_vectorized(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=8),
policy=MockPolicy,
batch_mode="truncate_episodes",
batch_steps=4,
num_envs=4)
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
batch = ev.sample()
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 4)
def test_vector_env_support(self):
ev = RolloutWorker(
env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),
policy=MockPolicy,
batch_mode="truncate_episodes",
batch_steps=10)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
def test_truncate_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
batch_steps=15,
batch_mode="truncate_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 15)
def test_complete_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
batch_steps=5,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 10)
def test_complete_episodes_packing(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
batch_steps=15,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 20)
self.assertEqual(
batch["t"].tolist(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_filter_sync(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
def test_get_filters(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
self.sample_and_flush(ev)
filters = ev.get_filters(flush_after=False)
time.sleep(2)
filters2 = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
obs_f2 = filters2[DEFAULT_POLICY_ID]
self.assertGreaterEqual(obs_f2.rs.n, obs_f.rs.n)
self.assertGreaterEqual(obs_f2.buffer.n, obs_f.buffer.n)
def test_sync_filter(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
obs_f = self.sample_and_flush(ev)
# Current State
filters = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertLessEqual(obs_f.buffer.n, 20)
new_obsf = obs_f.copy()
new_obsf.rs._n = 100
ev.sync_filters({DEFAULT_POLICY_ID: new_obsf})
filters = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertGreaterEqual(obs_f.rs.n, 100)
self.assertLessEqual(obs_f.buffer.n, 20)
def sample_and_flush(self, ev):
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
return obs_f
if __name__ == "__main__":
ray.init(num_cpus=5)
unittest.main(verbosity=2)
| {
"content_hash": "fb9b8b7c58fcf40d619b2e5875916cec",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 79,
"avg_line_length": 35.56192660550459,
"alnum_prop": 0.563818123186069,
"repo_name": "stephanie-wang/ray",
"id": "8cc52f3dc34903c711e36687be6a539bebce6e58",
"size": "15505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/tests/test_rollout_worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
} |
from flask.ext.restful import Resource, reqparse
class Test(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('id', type=int)
super(AccountAPI, self).__init__()
def get(self):
return {'id': id}
def post(self):
pass
def put(self):
pass
def delete(self):
pass | {
"content_hash": "0beb748ec498d6958726c29d971e19b3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 48,
"avg_line_length": 19.25,
"alnum_prop": 0.5688311688311688,
"repo_name": "chenke91/ckPermission",
"id": "59089f3e817e631a2c324dff33bf47d4b0baa3b9",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api_v1/resources/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1600523"
},
{
"name": "HTML",
"bytes": "88441"
},
{
"name": "JavaScript",
"bytes": "691180"
},
{
"name": "Python",
"bytes": "35851"
}
],
"symlink_target": ""
} |
"""
Univariate lowess function, like in R.
References
----------
Hastie, Tibshirani, Friedman. (2009) The Elements of Statistical Learning: Data Mining, Inference, and Prediction, Second Edition: Chapter 6.
Cleveland, W.S. (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots". Journal of the American Statistical Association 74 (368): 829-836.
"""
import numpy as np
from scipy.linalg import lstsq
def lowess(endog, exog, frac = 2./3, it = 3):
"""
LOWESS (Locally Weighted Scatterplot Smoothing)
A lowess function that outs smoothed estimates of endog
at the given exog values from points (exog, endog)
Parameters
----------
endog: 1-D numpy array
The y-values of the observed points
exog: 1-D numpy array
The x-values of the observed points
frac: float
Between 0 and 1. The fraction of the data used
when estimating each y-value.
it: int
The number of residual-based reweightings
to perform.
Returns
-------
out: numpy array
A numpy array with two columns. The first column
is the sorted x values and the second column the
associated estimated y-values.
Notes
-----
This lowess function implements the algorithm given in the
reference below using local linear estimates.
Suppose the input data has N points. The algorithm works by
estimating the true y_i by taking the frac*N closest points
to (x_i,y_i) based on their x values and estimating y_i
using a weighted linear regression. The weight for (x_j,y_j)
is _lowess_tricube function applied to `|x_i-x_j|`.
If iter>0, then further weighted local linear regressions
are performed, where the weights are the same as above
times the _lowess_bisquare function of the residuals. Each iteration
takes approximately the same amount of time as the original fit,
so these iterations are expensive. They are most useful when
the noise has extremely heavy tails, such as Cauchy noise.
Noise with less heavy-tails, such as t-distributions with df>2,
are less problematic. The weights downgrade the influence of
points with large residuals. In the extreme case, points whose
residuals are larger than 6 times the median absolute residual
are given weight 0.
Some experimentation is likely required to find a good
choice of frac and iter for a particular dataset.
References
----------
Cleveland, W.S. (1979) "Robust Locally Weighted Regression
and Smoothing Scatterplots". Journal of the American Statistical
Association 74 (368): 829-836.
Examples
--------
The below allows a comparison between how different the fits from
lowess for different values of frac can be.
>>> import numpy as np
>>> import statsmodels.api as sm
>>> from sm.nonparametric import lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + np.random.normal(size=len(x))
>>> z = lowess(y,x)
>>> w = lowess(y,x, frac=1./3)
This gives a similar comparison for when it is 0 vs not.
>>> import numpy as np
>>> import scipy.stats as stats
>>> import statsmodels.api as sm
>>> from sm.nonparametric import lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
>>> z = lowess(y,x, frac= 1./3, it=0)
>>> w = lowess(y,x, frac=1./3)
"""
x = exog
y = endog
if exog.ndim != 1:
raise ValueError('exog must be a vector')
if endog.ndim != 1:
raise ValueError('endog must be a vector')
if endog.shape[0] != x.shape[0] :
raise ValueError('exog and endog must have same length')
n = exog.shape[0]
fitted = np.zeros(n)
k = int(frac * n)
index_array = np.argsort(exog)
x_copy = np.array(exog[index_array]) #, dtype ='float32')
y_copy = endog[index_array]
fitted, weights = _lowess_initial_fit(x_copy, y_copy, k, n)
for i in xrange(it):
_lowess_robustify_fit(x_copy, y_copy, fitted,
weights, k, n)
out = np.array([x_copy, fitted]).T
out.shape = (n,2)
return out
def _lowess_initial_fit(x_copy, y_copy, k, n):
"""
The initial weighted local linear regression for lowess.
Parameters
----------
x_copy : 1-d ndarray
The x-values/exogenous part of the data being smoothed
y_copy : 1-d ndarray
The y-values/ endogenous part of the data being smoothed
k : int
The number of data points which affect the linear fit for
each estimated point
n : int
The total number of points
Returns
-------
fitted : 1-d ndarray
The fitted y-values
weights : 2-d ndarray
An n by k array. The contribution to the weights in the
local linear fit coming from the distances between the
x-values
"""
weights = np.zeros((n,k), dtype = x_copy.dtype)
nn_indices = [0,k]
X = np.ones((k,2))
fitted = np.zeros(n)
for i in xrange(n):
#note: all _lowess functions are inplace, no return
left_width = x_copy[i] - x_copy[nn_indices[0]]
right_width = x_copy[nn_indices[1]-1] - x_copy[i]
width = max(left_width, right_width)
_lowess_wt_standardize(weights[i,:],
x_copy[nn_indices[0]:nn_indices[1]],
x_copy[i], width)
_lowess_tricube(weights[i,:])
weights[i,:] = np.sqrt(weights[i,:])
X[:,1] = x_copy[nn_indices[0]:nn_indices[1]]
y_i = weights[i,:] * y_copy[nn_indices[0]:nn_indices[1]]
beta = lstsq(weights[i,:].reshape(k,1) * X, y_i)[0]
fitted[i] = beta[0] + beta[1]*x_copy[i]
_lowess_update_nn(x_copy, nn_indices, i+1)
return fitted, weights
def _lowess_wt_standardize(weights, new_entries, x_copy_i, width):
"""
The initial phase of creating the weights.
Subtract the current x_i and divide by the width.
Parameters
----------
weights : ndarray
The memory where (new_entries - x_copy_i)/width will be placed
new_entries : ndarray
The x-values of the k closest points to x[i]
x_copy_i : float
x[i], the i'th point in the (sorted) x values
width : float
The maximum distance between x[i] and any point in new_entries
Returns
-------
Nothing. The modifications are made to weight in place.
"""
weights[:] = new_entries
weights -= x_copy_i
weights /= width
def _lowess_robustify_fit(x_copy, y_copy, fitted, weights, k, n):
"""
Additional weighted local linear regressions, performed if
iter>0. They take into account the sizes of the residuals,
to eliminate the effect of extreme outliers.
Parameters
----------
x_copy : 1-d ndarray
The x-values/exogenous part of the data being smoothed
y_copy : 1-d ndarray
The y-values/ endogenous part of the data being smoothed
fitted : 1-d ndarray
The fitted y-values from the previous iteration
weights : 2-d ndarray
An n by k array. The contribution to the weights in the
local linear fit coming from the distances between the
x-values
k : int
The number of data points which affect the linear fit for
each estimated point
n : int
The total number of points
Returns
-------
Nothing. The fitted values are modified in place.
"""
nn_indices = [0,k]
X = np.ones((k,2))
residual_weights = np.copy(y_copy)
residual_weights.shape = (n,)
residual_weights -= fitted
residual_weights = np.absolute(residual_weights)#, out=residual_weights)
s = np.median(residual_weights)
residual_weights /= (6*s)
too_big = residual_weights>=1
_lowess_bisquare(residual_weights)
residual_weights[too_big] = 0
for i in xrange(n):
total_weights = weights[i,:] * residual_weights[nn_indices[0]:
nn_indices[1]]
X[:,1] = x_copy[nn_indices[0]:nn_indices[1]]
y_i = total_weights * y_copy[nn_indices[0]:nn_indices[1]]
total_weights.shape = (k,1)
beta = lstsq(total_weights * X, y_i)[0]
fitted[i] = beta[0] + beta[1] * x_copy[i]
_lowess_update_nn(x_copy, nn_indices, i+1)
def _lowess_update_nn(x, cur_nn,i):
"""
Update the endpoints of the nearest neighbors to
the ith point.
Parameters
----------
x : iterable
The sorted points of x-values
cur_nn : list of length 2
The two current indices between which are the
k closest points to x[i]. (The actual value of
k is irrelevant for the algorithm.
i : int
The index of the current value in x for which
the k closest points are desired.
Returns
-------
Nothing. It modifies cur_nn in place.
"""
while True:
if cur_nn[1]<x.size:
left_dist = x[i] - x[cur_nn[0]]
new_right_dist = x[cur_nn[1]] - x[i]
if new_right_dist < left_dist:
cur_nn[0] = cur_nn[0] + 1
cur_nn[1] = cur_nn[1] + 1
else:
break
else:
break
def _lowess_tricube(t):
"""
The _tricube function applied to a numpy array.
The tricube function is (1-abs(t)**3)**3.
Parameters
----------
t : ndarray
Array the tricube function is applied to elementwise and
in-place.
Returns
-------
Nothing
"""
#t = (1-np.abs(t)**3)**3
t[:] = np.absolute(t) #, out=t) #numpy version?
_lowess_mycube(t)
t[:] = np.negative(t) #, out = t)
t += 1
_lowess_mycube(t)
def _lowess_mycube(t):
"""
Fast matrix cube
Parameters
----------
t : ndarray
Array that is cubed, elementwise and in-place
Returns
-------
Nothing
"""
#t **= 3
t2 = t*t
t *= t2
def _lowess_bisquare(t):
"""
The bisquare function applied to a numpy array.
The bisquare function is (1-t**2)**2.
Parameters
----------
t : ndarray
array bisquare function is applied to, element-wise and in-place.
Returns
-------
Nothing
"""
#t = (1-t**2)**2
t *= t
t[:] = np.negative(t) #, out=t)
t += 1
t *= t
| {
"content_hash": "566d889eb62bc613bdde192c89c3fb88",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 154,
"avg_line_length": 27.53280839895013,
"alnum_prop": 0.5989513822688275,
"repo_name": "pprett/statsmodels",
"id": "bf1d675f6f2c43f5ebabf38a3c3a0df7ab5b3838",
"size": "10490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/nonparametric/smoothers_lowess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "11707"
},
{
"name": "JavaScript",
"bytes": "11143"
},
{
"name": "Python",
"bytes": "4135946"
},
{
"name": "R",
"bytes": "5412"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_metadata():
import re
with open(os.path.join("filechooser", "__init__.py")) as f:
return dict(re.findall("__([a-z]+)__ = ['\"]([^'\"]+)['\"]", f.read()))
metadata = get_metadata()
if sys.argv[-1] == 'publish':
os.system('cd docs && make html')
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (metadata['version'], metadata['version']))
print(" git push --tags")
sys.exit()
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
setup(
name='django-filechooser',
version=metadata['version'],
description="""jQuery Filechooser for Django projects""",
long_description=readme + '\n\n' + history,
author='Martin van Wingerden',
author_email='[email protected]',
url='https://github.com/martinvw/django-filechooser',
download_url="https://github.com/martinvw/django-filechooser/archive/v"+metadata['version']+".tar.gz",
packages=[
'filechooser',
],
include_package_data=True,
install_requires=['humanize>=0.5.0'],
license="MIT",
zip_safe=False,
keywords='django-filechooser',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Environment :: Web Environment',
'Framework :: Django',
],
)
| {
"content_hash": "7790d54b3c96e6fc330f4b433ca37bb2",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 106,
"avg_line_length": 32.266666666666666,
"alnum_prop": 0.6146694214876033,
"repo_name": "martinvw/django-filechooser",
"id": "eb99481ac391149e288aade45927a659aa44b911",
"size": "1983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "526"
},
{
"name": "JavaScript",
"bytes": "3633"
},
{
"name": "Python",
"bytes": "15477"
}
],
"symlink_target": ""
} |
"""Implementation of paginate query."""
import sqlalchemy
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# copied from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise exception.InvalidInput(reason='Invalid sort key')
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in xrange(0, len(sort_keys)):
crit_attrs = []
for j in xrange(0, i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
elif sort_dirs[i] == 'asc':
crit_attrs.append((model_attr > marker_values[i]))
else:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
| {
"content_hash": "68c5837312936d43006de914f0c37281",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 37.30275229357798,
"alnum_prop": 0.6276438760452533,
"repo_name": "github-borat/cinder",
"id": "f2c5f4534bb655e7900b65b9c14dabfb9bf73f4b",
"size": "4879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/common/sqlalchemyutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6575951"
},
{
"name": "Shell",
"bytes": "8998"
}
],
"symlink_target": ""
} |
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import auto_control_deps_utils as acd
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# pylint: disable=unused-import
from tensorflow.python.ops.gen_functional_ops import remote_call
# pylint: enable=unused-import
from tensorflow.python.ops.gen_functional_ops import symbolic_gradient
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(yuanbyu, mrry): Handle stride to support sliding windows.
@tf_export(v1=["foldl"])
@dispatch.add_dispatch_support
def foldl(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name=None):
"""foldl on the list of tensors unpacked from `elems` on dimension 0.
This foldl operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn, and the second is the value at the current
position of `elems`. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from first
to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = tf.constant([1, 2, 3, 4, 5, 6])
sum = foldl(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldl", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (
tensor_shape.dimension_value(elems_flat[0].shape[0]) or
array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
a = nest.map_structure(lambda elem: elem.read(0), elems_ta)
i = constant_op.constant(1)
else:
a = initializer
i = constant_op.constant(0)
def compute(i, a):
elem_i = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a = fn(a, elem_i)
return [i + 1, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n,
compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
@tf_export("foldl", v1=[])
@dispatch.add_dispatch_support
@deprecation.deprecated_arg_values(
None,
"""back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.foldl(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.foldl(fn, elems))""",
warn_once=True,
back_prop=False)
def foldl_v2(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name=None):
"""foldl on the list of tensors unpacked from `elems` on dimension 0.
This foldl operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn, and the second is the value at the current
position of `elems`. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) Deprecated. False disables support for back
propagation. Prefer using `tf.stop_gradient` instead.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from first
to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = tf.constant([1, 2, 3, 4, 5, 6])
sum = foldl(lambda a, x: a + x, elems)
# sum == 21
```
"""
return foldl(
fn=fn,
elems=elems,
initializer=initializer,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
name=name)
@tf_export(v1=["foldr"])
@dispatch.add_dispatch_support
def foldr(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn, and the second is the value at the current position of
`elems`. If `initializer` is None, `elems` must contain at least one element,
and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from last
to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldr", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally and not
# issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (
tensor_shape.dimension_value(elems_flat[0].shape[0]) or
array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
i = n - 1
a = nest.map_structure(lambda elem: elem.read(i), elems_ta)
else:
i = n
a = initializer
def compute(i, a):
i -= 1
elem = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a_out = fn(a, elem)
return [i, a_out]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i > 0,
compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
@tf_export("foldr", v1=[])
@dispatch.add_dispatch_support
@deprecation.deprecated_arg_values(
None,
"""back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.foldr(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.foldr(fn, elems))""",
warn_once=True,
back_prop=False)
def foldr_v2(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn, and the second is the value at the current position of
`elems`. If `initializer` is None, `elems` must contain at least one element,
and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) Deprecated. False disables support for back
propagation. Prefer using `tf.stop_gradient` instead.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from last
to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
return foldr(
fn=fn,
elems=elems,
initializer=initializer,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
name=name)
@tf_export(v1=["scan"])
@dispatch.add_dispatch_support
def scan(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
infer_shape=True,
reverse=False,
name=None):
"""scan on the list of tensors unpacked from `elems` on dimension 0.
See also `tf.map_fn`.
The simplest version of `scan` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn, and the second is the value at the current
position of `elems`. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.
If reverse=True, it's fn(initializer, values[-1]).shape.
This method also allows multi-arity `elems` and accumulator. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The second argument of
`fn` must match the structure of `elems`.
If no `initializer` is provided, the output structure and dtypes of `fn`
are assumed to be the same as its input; and in this case, the first
argument of `fn` must match the structure of `elems`.
If an `initializer` is provided, then the output of `fn` must have the same
structure as `initializer`; and the first argument of `fn` must match
this structure.
For example, if `elems` is `(t1, [t2, t3])` and `initializer` is
`[i1, i2]` then an appropriate signature for `fn` in `python2` is:
`fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list,
`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the
one that works in `python3`, is:
`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.
Args:
fn: The callable to be performed. It accepts two arguments. The first will
have the same structure as `initializer` if one is provided, otherwise it
will have the same structure as `elems`. The second will have the same
(possibly nested) structure as `elems`. Its output must have the same
structure as `initializer` if one is provided, otherwise it must have the
same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
initial value for the accumulator, and the expected output type of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
reverse: (optional) True scans the tensor last to first (instead of first to
last).
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, and the previous accumulator value(s), from first to last (or
last to first, if `reverse=True`).
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `initializer` do not match.
ValueError: if the lengths of the output of `fn` and `initializer`
do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
sum = scan(lambda a, x: a + x, elems)
# sum == [1, 3, 6, 10, 15, 21]
sum = scan(lambda a, x: a + x, elems, reverse=True)
# sum == [21, 20, 18, 15, 11, 6]
```
```python
elems = np.array([1, 2, 3, 4, 5, 6])
initializer = np.array(0)
sum_one = scan(
lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)
# sum_one == [1, 2, 3, 4, 5, 6]
```
```python
elems = np.array([1, 0, 0, 0, 0, 0])
initializer = (np.array(0), np.array(1))
fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)
# fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if initializer is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(initializer)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(initializer, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "scan", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat
]
# Convert elems to tensor array. n may be known statically.
n = tensor_shape.dimension_value(elems_flat[0].shape[0])
if n is None:
n = array_ops.shape(elems_flat[0])[0]
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(
dtype=elem.dtype,
size=n,
dynamic_size=False,
element_shape=elem.shape[1:],
infer_shape=True) for elem in elems_flat
]
# Unpack elements
elems_ta = [
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)
]
if initializer is None:
a_flat = [elem.read(n - 1 if reverse else 0) for elem in elems_ta]
i = 1
else:
initializer_flat = output_flatten(initializer)
a_flat = [ops.convert_to_tensor(init) for init in initializer_flat]
i = 0
# Create a tensor array to store the intermediate values.
accs_ta = [
tensor_array_ops.TensorArray(
dtype=init.dtype,
size=n,
element_shape=init.shape if infer_shape else None,
dynamic_size=False,
infer_shape=infer_shape) for init in a_flat
]
if initializer is None:
accs_ta = [
acc_ta.write(n - 1 if reverse else 0, a)
for (acc_ta, a) in zip(accs_ta, a_flat)
]
def compute(i, a_flat, tas):
"""The loop body of scan.
Args:
i: the loop counter.
a_flat: the accumulator value(s), flattened.
tas: the output accumulator TensorArray(s), flattened.
Returns:
[i + 1, a_flat, tas]: the updated counter + new accumulator values +
updated TensorArrays
Raises:
TypeError: if initializer and fn() output structure do not match
ValueType: if initializer and fn() output lengths do not match
"""
packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_a = output_pack(a_flat)
a_out = fn(packed_a, packed_elems)
nest.assert_same_structure(elems if initializer is None else initializer,
a_out)
flat_a_out = output_flatten(a_out)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_a_out)]
if reverse:
next_i = i - 1
else:
next_i = i + 1
return (next_i, flat_a_out, tas)
if reverse:
initial_i = n - 1 - i
condition = lambda i, _1, _2: i >= 0
else:
initial_i = i
condition = lambda i, _1, _2: i < n
_, _, r_a = control_flow_ops.while_loop(
condition,
compute, (initial_i, a_flat, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
results_flat = [r.stack() for r in r_a]
n_static = tensor_shape.Dimension(
tensor_shape.dimension_value(
elems_flat[0].get_shape().with_rank_at_least(1)[0]))
for elem in elems_flat[1:]:
n_static.merge_with(
tensor_shape.Dimension(
tensor_shape.dimension_value(
elem.get_shape().with_rank_at_least(1)[0])))
for r in results_flat:
r.set_shape(
tensor_shape.TensorShape(n_static).concatenate(r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
@tf_export("scan", v1=[])
@dispatch.add_dispatch_support
@deprecation.deprecated_arg_values(
None,
"""back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.scan(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.scan(fn, elems))""",
warn_once=True,
back_prop=False)
def scan_v2(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
infer_shape=True,
reverse=False,
name=None):
"""scan on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `scan` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn, and the second is the value at the current
position of `elems`. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.
If reverse=True, it's fn(initializer, values[-1]).shape.
This method also allows multi-arity `elems` and accumulator. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The second argument of
`fn` must match the structure of `elems`.
If no `initializer` is provided, the output structure and dtypes of `fn`
are assumed to be the same as its input; and in this case, the first
argument of `fn` must match the structure of `elems`.
If an `initializer` is provided, then the output of `fn` must have the same
structure as `initializer`; and the first argument of `fn` must match
this structure.
For example, if `elems` is `(t1, [t2, t3])` and `initializer` is
`[i1, i2]` then an appropriate signature for `fn` in `python2` is:
`fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list,
`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the
one that works in `python3`, is:
`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.
Args:
fn: The callable to be performed. It accepts two arguments. The first will
have the same structure as `initializer` if one is provided, otherwise it
will have the same structure as `elems`. The second will have the same
(possibly nested) structure as `elems`. Its output must have the same
structure as `initializer` if one is provided, otherwise it must have the
same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
initial value for the accumulator, and the expected output type of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) Deprecated. False disables support for back
propagation. Prefer using `tf.stop_gradient` instead.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
reverse: (optional) True scans the tensor last to first (instead of first to
last).
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, and the previous accumulator value(s), from first to last (or
last to first, if `reverse=True`).
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `initializer` do not match.
ValueError: if the lengths of the output of `fn` and `initializer`
do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
sum = scan(lambda a, x: a + x, elems)
# sum == [1, 3, 6, 10, 15, 21]
sum = scan(lambda a, x: a + x, elems, reverse=True)
# sum == [21, 20, 18, 15, 11, 6]
```
```python
elems = np.array([1, 2, 3, 4, 5, 6])
initializer = np.array(0)
sum_one = scan(
lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)
# sum_one == [1, 2, 3, 4, 5, 6]
```
```python
elems = np.array([1, 0, 0, 0, 0, 0])
initializer = (np.array(0), np.array(1))
fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)
# fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])
```
"""
return scan(
fn=fn,
elems=elems,
initializer=initializer,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
infer_shape=infer_shape,
reverse=reverse,
name=name)
# pylint: disable=invalid-name
def If(cond, inputs, then_branch, else_branch, name=None):
r"""output = Cond(inputs) ?
then_branch(inputs) : else_branch(inputs).
Args:
cond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is
converted to a boolean according to the following rule: if the scalar is a
numerical value, non-zero means True and zero means False; if the scalar
is a string, non-empty means True and empty means False.
inputs: A list of input tensors.
then_branch: A function takes 'inputs' and returns a list of tensors, whose
types are the same as what else_branch returns.
else_branch: A function takes 'inputs' and returns a list of tensors. whose
types are the same as what then_branch returns.
name: A name for the operation (optional).
Returns:
A list of tensors returned by either then_branch(inputs)
or else_branch(inputs).
"""
# pylint: disable=protected-access
if isinstance(then_branch, function._DefinedFunction):
tlist = [_.type for _ in then_branch.definition.signature.output_arg]
else:
# We assume that `then_branch` is a ConcreteFunction here.
tlist = nest.flatten(then_branch.output_dtypes)
return gen_functional_ops._if(
cond, inputs, tlist, then_branch, else_branch, name=name)
def Gradient(inputs, f, name=None):
r"""Computes the gradient function for function f via backpropagation.
Args:
inputs: A list of tensors of size N + M.
f: The function we want to compute the gradient for. The function 'f' must
be a numerical function which takes N inputs and produces M outputs. Its
gradient function 'g', which is a function taking N + M inputs and
produces N outputs. I.e. if we have (y1, y2, ..., yM) = f(x1, x2, ...,
xN), then, g is (dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN, dL/dy1,
dL/dy2, ..., dL/dyM), where L is a scalar-value function of (x1, x2, ...,
xN) (e.g., the loss function). dL/dxi is the partial derivative of L with
respect to xi.
name: A name for the operation (optional).
Returns:
A list of tensors of size N.
"""
# TODO(zhifengc): Pretty-print the above spec in latex.
# TODO(zhfiengc): Needs some math expert to say the comment above better.
tlist = [_.type for _ in f.definition.signature.input_arg]
return symbolic_gradient(input=inputs, Tout=tlist, f=f, name=name)
def _GetInputDtypes(func):
"""Returns the input dtypes of func, excluding dtypes for captured inputs."""
if isinstance(func, function._DefinedFunction): # pylint: disable=protected-access
return func.declared_input_types
# We assume that `func` is a ConcreteFunction here, but we are not able to
# verify since importing eager function library will cause cyclic dependence.
#
# ConcreteFunction.inputs includes captured inputs.
num_non_captured_inputs = len(func.inputs) - len(func.captured_inputs)
inputs_without_captured = func.inputs[:num_non_captured_inputs]
return [t.dtype for t in inputs_without_captured]
def _LoopBodyCaptureWrapper(func):
"""Returns a wrapper for `func` that handles loop-carried captured inputs."""
@function.Defun(*_GetInputDtypes(func), func_name="%s_Wrapper" % func.name)
def Wrapper(*args):
"""A wrapper that handles loop-carried captured inputs."""
result = func(*args)
extra_args = tuple(function.get_extra_args())
# Nullary functions return an Operation. Normal functions can't do this
# because their return values are converted to Tensors.
if isinstance(result, ops.Operation):
return extra_args
# Unary functions return a single Tensor value.
elif not isinstance(result, (list, tuple)):
return (result,) + extra_args
# N-ary functions return a tuple of Tensors.
else:
return result + type(result)(extra_args)
return Wrapper
# pylint: disable=invalid-name,protected-access
def While(input_, cond, body, name=None, hostmem=None):
r"""output = input; While (Cond(output)) { output = Body(output) }.
Args:
input_: A list of `Tensor` objects. A list of input tensors whose types are
T.
cond: . A function takes 'input' and returns a tensor. If the tensor is a
scalar of non-boolean, the scalar is converted to a boolean
according to the following rule: if the scalar is a numerical value,
non-zero means True and zero means False; if the scalar is a string,
non-empty means True and empty means False. If the tensor is not a
scalar, non-emptiness means True and False otherwise.
body: . A function takes a list of tensors and returns another list tensors.
Both lists have the same types as specified by T.
name: A name for the operation (optional).
hostmem: A list of integer. If i is in the list, input[i] is a host memory
tensor.
Raises:
ValueError: if `cond` has implicitly captured inputs or if `cond` and `body`
have different signatures.
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
if cond.captured_inputs:
raise ValueError("While op 'cond' argument must be a function "
"without implicitly captured inputs.")
cond_input_types = _GetInputDtypes(cond)
body_input_types = _GetInputDtypes(body)
if cond_input_types != body_input_types:
raise ValueError(
"While op 'cond' and 'body' signatures do not match. %r vs %r" %
(cond_input_types, body_input_types))
if body.captured_inputs:
cond_dtypes = list(body_input_types) + [
t.dtype for t in body.captured_inputs
]
@function.Defun(*cond_dtypes, func_name="%s_Wrapper" % cond.name)
def CondWrapper(*args):
"""A wrapper that handles loop-carried captured inputs."""
return cond(*args[:len(body_input_types)])
ret = gen_functional_ops._while(
input_ + body.captured_inputs,
CondWrapper,
_LoopBodyCaptureWrapper(body),
name=name)
# Slice off the loop-carried captured inputs.
ret = ret[:-len(body.captured_inputs)]
else:
ret = gen_functional_ops._while(input_, cond, body, name=name)
if hostmem:
input_attr = attr_value_pb2.AttrValue()
input_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access
output_attr = attr_value_pb2.AttrValue()
output_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access
return ret
# b/36459430
#
# Ideally, we do not need this rewrite For loop into a While loop.
# However, today, if a While runs on GPU and the condition returns a
# boolean, the While kernel crashes. Even if we fix the crash, the
# bool needs to be copied between GPU and CPU. So, a for loop is much
# preferred when running on GPU.
#
# On the other hand, For op has no directly XLA kernel. So, when we run
# a for loop, we need to rewrite it using a While op.
#
# It should be possible and probably better to write a XLA C++ kernel
# implementing the logic in _ForUsingWhile.
def _ForUsingWhile(start,
limit,
delta,
inputs,
forbody,
name=None,
hostmem=None):
"""Helper to implement a For loop using a While."""
# To support negative delta (e.g., range(100, 0, -3)), we iterate
# over the range(n) and use iter * delta + start as the real
# iteration index. (e.g., for i in range(34): iter = i * (-3) +
# 100).
d = math_ops.abs(delta)
# XLA on TPUs doesn't support integer division
n = math_ops.cast(
math_ops.cast((math_ops.abs(limit - start) + d - 1), dtypes.float32) /
math_ops.cast(d, dtypes.float32), dtypes.int32)
# Carried loop variables ("extra_args") are implicitly added to the input list
# of the WhileBody function. WhileCond does not call forbody, and so does not
# depend on any of forbody's extra_args. Since WhileCond and WhileBody
# must have identical inputs, we have to augment the cond signature to take
# the same types as the carried loop variables.
body_sig = [dtypes.int32] * 4 + list(forbody.declared_input_types)[1:]
cond_name = "%s_Cond" % forbody.name
@function.Defun(*body_sig, func_name=cond_name)
def WhileCond(i, n, *args):
del args
return i < n
body_name = "%s_Body" % forbody.name
@function.Defun(*body_sig, func_name=body_name)
def WhileBody(i, n, start, delta, *args):
"""A While wrapper for forbody that handles loop-carried captured inputs."""
for_result = forbody(start + i * delta, *args)
# Nullary functions return an Operation. Normal functions can't do this
# because their return values are converted to Tensors.
if isinstance(for_result, ops.Operation):
for_result = ()
# Unary functions return a single Tensor value.
elif isinstance(for_result, ops.Tensor):
for_result = (for_result,)
return (i + 1, n, start, delta) + tuple(for_result)
if hostmem is not None:
hostmem = [0, 1, 2, 3] + [(4 + _) for _ in hostmem]
else:
hostmem = [0, 1, 2, 3]
results = While(
input_=[0, n, start, delta] + inputs,
cond=WhileCond,
body=WhileBody,
name=name,
hostmem=hostmem)
# Slice off the loop-carried captured inputs.
return list(results[4:len(results)])
def For(start,
limit,
delta,
inputs,
body,
name=None,
hostmem=None,
rewrite_with_while=None):
r"""out = input; for i in range(start, limit, delta) out = body(i, out).
Args:
start: A `Tensor` of type `int32`.
limit: A `Tensor` of type `int32`.
delta: A `Tensor` of type `int32`.
inputs: A list of `Tensor` objects. A list of input tensors whose types are
T.
body: A function takes a list of tensors and returns another list of
tensors. Both lists have the same types as (int32, T...).
name: A name for the operation (optional).
hostmem: A list of integer. If i is in the list, inputs[i] is a host memory
tensor. In other words, (i+1)-th argument of the body function is
expecting a host memory.
rewrite_with_while: If True, using While op to implement the For.
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
if rewrite_with_while:
return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem)
if body.captured_inputs:
ret = gen_functional_ops._for(
start,
limit,
delta,
inputs + body.captured_inputs,
_LoopBodyCaptureWrapper(body),
name=name)
# Slice off the loop-carried captured inputs.
ret = ret[:-len(body.captured_inputs)]
else:
ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name)
if hostmem:
num_for_params = 3 # start/limit/delta
input_attr = attr_value_pb2.AttrValue()
input_attr.list.i.extend([num_for_params + i for i in hostmem])
ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access
output_attr = attr_value_pb2.AttrValue()
output_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access
return ret
# pylint: enable=invalid-name,protected-access
def partitioned_call(args,
f,
tout=None,
executing_eagerly=None,
config=None,
executor_type=None):
"""Executes a function while respecting device annotations.
Currently, only those functions that execute within the same address space
can be executed.
Args:
args: The arguments of the function, including captured inputs.
f: The function to execute; an instance of `_DefinedFunction` or
`_EagerDefinedFunction`.
tout: a list containing the output dtypes enums; if `None`, inferred from
the signature of `f`.
executing_eagerly: (Optional) A boolean indicating whether the context is
executing eagerly. If `None`, fetched from the global context.
config: (Optional) A `tensorflow::ConfigProto` proto, serialized. If `None`,
all optimizations are disabled. Currently only handled for eager defined
functions.
executor_type: (Optional) A string for the name of the executor to be used
in the function call. If not set, or set to an empty string, the default
tensorflow executor will be used.
Returns:
The list of `Tensor`s returned by invoking `f(args)`. If the function does
not return anything, then returns `None` if eager execution is enabled, or
the `Operation` if not.
"""
if tout is None:
tout = tuple(x.type for x in f.definition.signature.output_arg)
if executing_eagerly is None:
executing_eagerly = context.executing_eagerly()
if config is None:
config = function_utils.get_disabled_rewriter_config()
if executor_type is None:
executor_type = ""
if executing_eagerly:
if f.stateful_ops:
outputs = gen_functional_ops.stateful_partitioned_call(
args=args,
Tout=tout,
f=f,
config_proto=config,
executor_type=executor_type)
else:
outputs = gen_functional_ops.partitioned_call(
args=args,
Tout=tout,
f=f,
config_proto=config,
executor_type=executor_type)
return outputs if outputs else None
# The generated binding returns an empty list for functions that don't
# return any Tensors, hence the need to use `create_op` directly.
args = [ops.convert_to_tensor(x) for x in args]
tin_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
type=[x.dtype.as_datatype_enum for x in args]))
tout_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(type=tout))
func_attr = attr_value_pb2.AttrValue(
func=attr_value_pb2.NameAttrList(name=f.name))
executor_type_attr = attr_value_pb2.AttrValue(
s=compat.as_bytes(executor_type))
# When running in graph mode, the graph and function graphs are optimized
# (i.e. run through grappler) per the session options, so we can disable any
# eager-specific rewriting.
config_proto = attr_value_pb2.AttrValue(s=config)
graph = ops.get_default_graph()
f.add_to_graph(graph)
op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall"
# Propagate the attribute indicating the need to compile from function to the
# call itself.
xla_compile_attr = "_XlaMustCompile"
op_attrs = {
"Tin": tin_attr,
"Tout": tout_attr,
"f": func_attr,
"config_proto": config_proto,
"executor_type": executor_type_attr,
}
if xla_compile_attr in f.definition.attr:
op_attrs[xla_compile_attr] = f.definition.attr[xla_compile_attr]
op = graph.create_op(op_name, args, tout, name=op_name, attrs=op_attrs)
outputs = op.outputs
if hasattr(f, "graph"):
_set_read_only_resource_inputs_attr(op, f.graph)
if hasattr(f.graph, "collective_manager_ids_used"):
ops.set_int_list_attr(op, acd.COLLECTIVE_MANAGER_IDS,
f.graph.collective_manager_ids_used)
return outputs if outputs else op
def _set_read_only_resource_inputs_attr(op, func_graph):
"""Sets the list of resource inputs which are read-only.
This is used by AutomaticControlDependencies.
Args:
op: PartitionedCall Operation.
func_graph: FuncGraph.
"""
read_only_indices = acd.get_read_only_resource_input_indices_graph(func_graph)
ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR,
read_only_indices)
| {
"content_hash": "e6a84f93cab2f4a4499bb97bf782b44d",
"timestamp": "",
"source": "github",
"line_count": 1214,
"max_line_length": 91,
"avg_line_length": 38.766062602965405,
"alnum_prop": 0.6687773575283669,
"repo_name": "aldian/tensorflow",
"id": "6e285d6681d1574eb8098c121d2e1e360d3b8034",
"size": "47750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/functional_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
} |
import concurrent
import time
import testtools
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
import openstack
import openstack.cloud
from openstack.cloud import meta
from openstack import exceptions
from openstack.tests import fakes
from openstack.tests.unit import base
from openstack.tests.unit.cloud import test_port
# Mock out the gettext function so that the task schema can be copypasta
def _(msg):
return msg
_TASK_PROPERTIES = {
"id": {
"description": _("An identifier for the task"),
"pattern": _('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
"type": "string"
},
"type": {
"description": _("The type of task represented by this content"),
"enum": [
"import",
],
"type": "string"
},
"status": {
"description": _("The current status of this task"),
"enum": [
"pending",
"processing",
"success",
"failure"
],
"type": "string"
},
"input": {
"description": _("The parameters required by task, JSON blob"),
"type": ["null", "object"],
},
"result": {
"description": _("The result of current task, JSON blob"),
"type": ["null", "object"],
},
"owner": {
"description": _("An identifier for the owner of this task"),
"type": "string"
},
"message": {
"description": _("Human-readable informative message only included"
" when appropriate (usually on failure)"),
"type": "string",
},
"expires_at": {
"description": _("Datetime when this resource would be"
" subject to removal"),
"type": ["null", "string"]
},
"created_at": {
"description": _("Datetime when this resource was created"),
"type": "string"
},
"updated_at": {
"description": _("Datetime when this resource was updated"),
"type": "string"
},
'self': {'type': 'string'},
'schema': {'type': 'string'}
}
_TASK_SCHEMA = dict(
name='Task', properties=_TASK_PROPERTIES,
additionalProperties=False,
)
class TestMemoryCache(base.TestCase):
def setUp(self):
super(TestMemoryCache, self).setUp(
cloud_config_fixture='clouds_cache.yaml')
def _image_dict(self, fake_image):
return self.cloud._normalize_image(meta.obj_to_munch(fake_image))
def _munch_images(self, fake_image):
return self.cloud._normalize_images([fake_image])
def test_openstack_cloud(self):
self.assertIsInstance(self.cloud, openstack.connection.Connection)
def test_list_projects_v3(self):
project_one = self._get_project_data()
project_two = self._get_project_data()
project_list = [project_one, project_two]
first_response = {'projects': [project_one.json_response['project']]}
second_response = {'projects': [p.json_response['project']
for p in project_list]}
mock_uri = self.get_mock_url(
service_type='identity', resource='projects',
base_url_append='v3')
self.register_uris([
dict(method='GET', uri=mock_uri, status_code=200,
json=first_response),
dict(method='GET', uri=mock_uri, status_code=200,
json=second_response)])
self.assertEqual(
self.cloud._normalize_projects(
meta.obj_list_to_munch(first_response['projects'])),
self.cloud.list_projects())
self.assertEqual(
self.cloud._normalize_projects(
meta.obj_list_to_munch(first_response['projects'])),
self.cloud.list_projects())
# invalidate the list_projects cache
self.cloud.list_projects.invalidate(self.cloud)
# ensure the new values are now retrieved
self.assertEqual(
self.cloud._normalize_projects(
meta.obj_list_to_munch(second_response['projects'])),
self.cloud.list_projects())
self.assert_calls()
def test_list_projects_v2(self):
self.use_keystone_v2()
project_one = self._get_project_data(v3=False)
project_two = self._get_project_data(v3=False)
project_list = [project_one, project_two]
first_response = {'tenants': [project_one.json_response['tenant']]}
second_response = {'tenants': [p.json_response['tenant']
for p in project_list]}
mock_uri = self.get_mock_url(
service_type='identity', interface='admin', resource='tenants')
self.register_uris([
dict(method='GET', uri=mock_uri, status_code=200,
json=first_response),
dict(method='GET', uri=mock_uri, status_code=200,
json=second_response)])
self.assertEqual(
self.cloud._normalize_projects(
meta.obj_list_to_munch(first_response['tenants'])),
self.cloud.list_projects())
self.assertEqual(
self.cloud._normalize_projects(
meta.obj_list_to_munch(first_response['tenants'])),
self.cloud.list_projects())
# invalidate the list_projects cache
self.cloud.list_projects.invalidate(self.cloud)
# ensure the new values are now retrieved
self.assertEqual(
self.cloud._normalize_projects(
meta.obj_list_to_munch(second_response['tenants'])),
self.cloud.list_projects())
self.assert_calls()
def test_list_servers_no_herd(self):
self.cloud._SERVER_AGE = 2
fake_server = fakes.make_fake_server('1234', 'name')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
])
with concurrent.futures.ThreadPoolExecutor(16) as pool:
for i in range(16):
pool.submit(lambda: self.cloud.list_servers(bare=True))
# It's possible to race-condition 16 threads all in the
# single initial lock without a tiny sleep
time.sleep(0.001)
self.assert_calls()
def test_list_volumes(self):
fake_volume = fakes.FakeVolume('volume1', 'available',
'Volume 1 Display Name')
fake_volume_dict = meta.obj_to_munch(fake_volume)
fake_volume2 = fakes.FakeVolume('volume2', 'available',
'Volume 2 Display Name')
fake_volume2_dict = meta.obj_to_munch(fake_volume2)
self.register_uris([
self.get_cinder_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volume_dict]}),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volume_dict, fake_volume2_dict]})])
self.assertEqual(
[self.cloud._normalize_volume(fake_volume_dict)],
self.cloud.list_volumes())
# this call should hit the cache
self.assertEqual(
[self.cloud._normalize_volume(fake_volume_dict)],
self.cloud.list_volumes())
self.cloud.list_volumes.invalidate(self.cloud)
self.assertEqual(
[self.cloud._normalize_volume(fake_volume_dict),
self.cloud._normalize_volume(fake_volume2_dict)],
self.cloud.list_volumes())
self.assert_calls()
def test_list_volumes_creating_invalidates(self):
fake_volume = fakes.FakeVolume('volume1', 'creating',
'Volume 1 Display Name')
fake_volume_dict = meta.obj_to_munch(fake_volume)
fake_volume2 = fakes.FakeVolume('volume2', 'available',
'Volume 2 Display Name')
fake_volume2_dict = meta.obj_to_munch(fake_volume2)
self.register_uris([
self.get_cinder_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volume_dict]}),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volume_dict, fake_volume2_dict]})])
self.assertEqual(
[self.cloud._normalize_volume(fake_volume_dict)],
self.cloud.list_volumes())
self.assertEqual(
[self.cloud._normalize_volume(fake_volume_dict),
self.cloud._normalize_volume(fake_volume2_dict)],
self.cloud.list_volumes())
self.assert_calls()
def test_create_volume_invalidates(self):
fake_volb4 = meta.obj_to_munch(
fakes.FakeVolume('volume1', 'available', ''))
_id = '12345'
fake_vol_creating = meta.obj_to_munch(
fakes.FakeVolume(_id, 'creating', ''))
fake_vol_avail = meta.obj_to_munch(
fakes.FakeVolume(_id, 'available', ''))
def now_deleting(request, context):
fake_vol_avail['status'] = 'deleting'
self.register_uris([
self.get_cinder_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volb4]}),
dict(method='POST',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes']),
json={'volume': fake_vol_creating}),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volb4, fake_vol_creating]}),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volb4, fake_vol_avail]}),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volb4, fake_vol_avail]}),
dict(method='DELETE',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', _id]),
json=now_deleting),
dict(method='GET',
uri=self.get_mock_url(
'volumev2', 'public', append=['volumes', 'detail']),
json={'volumes': [fake_volb4]})])
self.assertEqual(
[self.cloud._normalize_volume(fake_volb4)],
self.cloud.list_volumes())
volume = dict(display_name='junk_vol',
size=1,
display_description='test junk volume')
self.cloud.create_volume(wait=True, timeout=None, **volume)
# If cache was not invalidated, we would not see our own volume here
# because the first volume was available and thus would already be
# cached.
self.assertEqual(
[self.cloud._normalize_volume(fake_volb4),
self.cloud._normalize_volume(fake_vol_avail)],
self.cloud.list_volumes())
self.cloud.delete_volume(_id)
# And now delete and check same thing since list is cached as all
# available
self.assertEqual(
[self.cloud._normalize_volume(fake_volb4)],
self.cloud.list_volumes())
self.assert_calls()
def test_list_users(self):
user_data = self._get_user_data(email='[email protected]')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
service_type='identity',
resource='users',
base_url_append='v3'),
status_code=200,
json={'users': [user_data.json_response['user']]})])
users = self.cloud.list_users()
self.assertEqual(1, len(users))
self.assertEqual(user_data.user_id, users[0]['id'])
self.assertEqual(user_data.name, users[0]['name'])
self.assertEqual(user_data.email, users[0]['email'])
self.assert_calls()
def test_modify_user_invalidates_cache(self):
self.use_keystone_v2()
user_data = self._get_user_data(email='[email protected]')
new_resp = {'user': user_data.json_response['user'].copy()}
new_resp['user']['email'] = '[email protected]'
new_req = {'user': {'email': new_resp['user']['email']}}
mock_users_url = self.get_mock_url(
service_type='identity',
interface='admin',
resource='users')
mock_user_resource_url = self.get_mock_url(
service_type='identity',
interface='admin',
resource='users',
append=[user_data.user_id])
empty_user_list_resp = {'users': []}
users_list_resp = {'users': [user_data.json_response['user']]}
updated_users_list_resp = {'users': [new_resp['user']]}
# Password is None in the original create below
user_data.json_request['user']['password'] = None
uris_to_mock = [
# Inital User List is Empty
dict(method='GET', uri=mock_users_url, status_code=200,
json=empty_user_list_resp),
# POST to create the user
# GET to get the user data after POST
dict(method='POST', uri=mock_users_url, status_code=200,
json=user_data.json_response,
validate=dict(json=user_data.json_request)),
# List Users Call
dict(method='GET', uri=mock_users_url, status_code=200,
json=users_list_resp),
# List users to get ID for update
# Get user using user_id from list
# Update user
# Get updated user
dict(method='GET', uri=mock_users_url, status_code=200,
json=users_list_resp),
dict(method='PUT', uri=mock_user_resource_url, status_code=200,
json=new_resp, validate=dict(json=new_req)),
# List Users Call
dict(method='GET', uri=mock_users_url, status_code=200,
json=updated_users_list_resp),
# List User to get ID for delete
# Get user using user_id from list
# delete user
dict(method='GET', uri=mock_users_url, status_code=200,
json=updated_users_list_resp),
dict(method='GET', uri=mock_user_resource_url, status_code=200,
json=new_resp),
dict(method='DELETE', uri=mock_user_resource_url, status_code=204),
# List Users Call (empty post delete)
dict(method='GET', uri=mock_users_url, status_code=200,
json=empty_user_list_resp)
]
self.register_uris(uris_to_mock)
# first cache an empty list
self.assertEqual([], self.cloud.list_users())
# now add one
created = self.cloud.create_user(name=user_data.name,
email=user_data.email)
self.assertEqual(user_data.user_id, created['id'])
self.assertEqual(user_data.name, created['name'])
self.assertEqual(user_data.email, created['email'])
# Cache should have been invalidated
users = self.cloud.list_users()
self.assertEqual(user_data.user_id, users[0]['id'])
self.assertEqual(user_data.name, users[0]['name'])
self.assertEqual(user_data.email, users[0]['email'])
# Update and check to see if it is updated
updated = self.cloud.update_user(user_data.user_id,
email=new_resp['user']['email'])
self.assertEqual(user_data.user_id, updated.id)
self.assertEqual(user_data.name, updated.name)
self.assertEqual(new_resp['user']['email'], updated.email)
users = self.cloud.list_users()
self.assertEqual(1, len(users))
self.assertEqual(user_data.user_id, users[0]['id'])
self.assertEqual(user_data.name, users[0]['name'])
self.assertEqual(new_resp['user']['email'], users[0]['email'])
# Now delete and ensure it disappears
self.cloud.delete_user(user_data.user_id)
self.assertEqual([], self.cloud.list_users())
self.assert_calls()
def test_list_flavors(self):
mock_uri = '{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT)
uris_to_mock = [
dict(method='GET', uri=mock_uri, json={'flavors': []}),
dict(method='GET', uri=mock_uri,
json={'flavors': fakes.FAKE_FLAVOR_LIST})
]
self.register_uris(uris_to_mock)
self.assertEqual([], self.cloud.list_flavors())
self.assertEqual([], self.cloud.list_flavors())
fake_flavor_dicts = self.cloud._normalize_flavors(
fakes.FAKE_FLAVOR_LIST)
self.cloud.list_flavors.invalidate(self.cloud)
self.assertEqual(fake_flavor_dicts, self.cloud.list_flavors())
self.assert_calls()
def test_list_images(self):
self.use_glance()
fake_image = fakes.make_fake_image(image_id='42')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url('image', 'public',
append=['v2', 'images']),
json={'images': []}),
dict(method='GET',
uri=self.get_mock_url('image', 'public',
append=['v2', 'images']),
json={'images': [fake_image]}),
])
self.assertEqual([], self.cloud.list_images())
self.assertEqual([], self.cloud.list_images())
self.cloud.list_images.invalidate(self.cloud)
self.assertEqual(
self._munch_images(fake_image), self.cloud.list_images())
self.assert_calls()
def test_list_images_caches_deleted_status(self):
self.use_glance()
deleted_image_id = self.getUniqueString()
deleted_image = fakes.make_fake_image(
image_id=deleted_image_id, status='deleted')
active_image_id = self.getUniqueString()
active_image = fakes.make_fake_image(image_id=active_image_id)
list_return = {'images': [active_image, deleted_image]}
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=list_return),
])
self.assertEqual(
[self.cloud._normalize_image(active_image)],
self.cloud.list_images())
self.assertEqual(
[self.cloud._normalize_image(active_image)],
self.cloud.list_images())
# We should only have one call
self.assert_calls()
def test_cache_no_cloud_name(self):
self.use_glance()
self.cloud.name = None
fi = fakes.make_fake_image(image_id=self.getUniqueString())
fi2 = fakes.make_fake_image(image_id=self.getUniqueString())
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi]}),
dict(method='GET',
uri='https://image.example.com/v2/images',
json={'images': [fi, fi2]}),
])
self.assertEqual(
self._munch_images(fi),
self.cloud.list_images())
# Now test that the list was cached
self.assertEqual(
self._munch_images(fi),
self.cloud.list_images())
# Invalidation too
self.cloud.list_images.invalidate(self.cloud)
self.assertEqual(
[
self.cloud._normalize_image(fi),
self.cloud._normalize_image(fi2)
],
self.cloud.list_images())
def test_list_ports_filtered(self):
down_port = test_port.TestPort.mock_neutron_port_create_rep['port']
active_port = down_port.copy()
active_port['status'] = 'ACTIVE'
# We're testing to make sure a query string isn't passed when we're
# caching, but that the results are still filtered.
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json']),
json={'ports': [
down_port,
active_port,
]}),
])
ports = self.cloud.list_ports(filters={'status': 'DOWN'})
self.assertCountEqual([down_port], ports)
self.assert_calls()
class TestCacheIgnoresQueuedStatus(base.TestCase):
scenarios = [
('queued', dict(status='queued')),
('saving', dict(status='saving')),
('pending_delete', dict(status='pending_delete')),
]
def setUp(self):
super(TestCacheIgnoresQueuedStatus, self).setUp(
cloud_config_fixture='clouds_cache.yaml')
self.use_glance()
active_image_id = self.getUniqueString()
self.active_image = fakes.make_fake_image(
image_id=active_image_id, status=self.status)
self.active_list_return = {'images': [self.active_image]}
steady_image_id = self.getUniqueString()
self.steady_image = fakes.make_fake_image(image_id=steady_image_id)
self.steady_list_return = {
'images': [self.active_image, self.steady_image]}
def test_list_images_ignores_pending_status(self):
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=self.active_list_return),
dict(method='GET',
uri='https://image.example.com/v2/images',
json=self.steady_list_return),
])
self.assertEqual(
[self.cloud._normalize_image(self.active_image)],
self.cloud.list_images())
# Should expect steady_image to appear if active wasn't cached
self.assertEqual(
[
self.cloud._normalize_image(self.active_image),
self.cloud._normalize_image(self.steady_image)
],
self.cloud.list_images())
class TestCacheSteadyStatus(base.TestCase):
scenarios = [
('active', dict(status='active')),
('killed', dict(status='killed')),
]
def setUp(self):
super(TestCacheSteadyStatus, self).setUp(
cloud_config_fixture='clouds_cache.yaml')
self.use_glance()
active_image_id = self.getUniqueString()
self.active_image = fakes.make_fake_image(
image_id=active_image_id, status=self.status)
self.active_list_return = {'images': [self.active_image]}
def test_list_images_caches_steady_status(self):
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=self.active_list_return),
])
self.assertEqual(
[self.cloud._normalize_image(self.active_image)],
self.cloud.list_images())
self.assertEqual(
[self.cloud._normalize_image(self.active_image)],
self.cloud.list_images())
# We should only have one call
self.assert_calls()
class TestBogusAuth(base.TestCase):
def setUp(self):
super(TestBogusAuth, self).setUp(
cloud_config_fixture='clouds_cache.yaml')
def test_get_auth_bogus(self):
with testtools.ExpectedException(exceptions.ConfigException):
openstack.connect(
cloud='_bogus_test_', config=self.config)
| {
"content_hash": "ab4afcc9ddf254e38fa683a8370db89f",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 79,
"avg_line_length": 38.3046875,
"alnum_prop": 0.5510095859677748,
"repo_name": "stackforge/python-openstacksdk",
"id": "66345e27badc62e6ef0852694ac4a7d444797bad",
"size": "25060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/cloud/test_caching.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1138292"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
} |
from elasticsearch_parse import Search, Q
s = Search(index="my-index") \
.filter("term", blog="xiaorui.cc") \
.query("match", author="ruifengyun") \
.query(~Q("match", face="good"))
s.aggs.bucket('per_tag', 'terms', field='tags')
print s.execute()
| {
"content_hash": "af59dd1d296db4c26cda1d877ffee9fc",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 47,
"avg_line_length": 24.181818181818183,
"alnum_prop": 0.6240601503759399,
"repo_name": "rfyiamcool/elasticsearch_parse",
"id": "5baae9341ed4a061f1f22d89e532d9ea7ef14ae5",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62304"
}
],
"symlink_target": ""
} |
import nomad
import pytest
import tests.common as common
@pytest.fixture
def nomad_setup():
n = nomad.Nomad(host=common.IP, port=common.NOMAD_PORT, verify=False, token=common.NOMAD_TOKEN)
return n
@pytest.fixture
def nomad_setup_with_namespace():
n = nomad.Nomad(host=common.IP, port=common.NOMAD_PORT, verify=False, token=common.NOMAD_TOKEN, namespace=common.NOMAD_NAMESPACE)
return n
| {
"content_hash": "df877437fb4bd082a14a98fdd735e134",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 133,
"avg_line_length": 31.076923076923077,
"alnum_prop": 0.7524752475247525,
"repo_name": "jrxFive/python-nomad",
"id": "a871f3b3a0eecc12db5a0ea5d6fa5d47312556cf",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "173098"
}
],
"symlink_target": ""
} |
"""Capturing a single image from webcam
In Linux there are the following methods:
METHOD 1: RTSP protocol
avconv -i rtsp://<user>:<pass>@<local_ip>:<port>/video.mjpg -vframes 1 -r 1 -s 640x480 image.jpg
METHOD 2: HTTP protocol
avconv -i http://<user>:<pass>@<local_ip>:<port>/video.mjpg -vframes 1 -r 1 -s 640x480 image.jpg
METHOD 3: If the camera is smart enough, it is possible to send an http request to take a snapshot
wget --tries=2 --timeout=10 http://<user>:<pass>@<local_ip>:<port>/cgi-bin/jpg/image -O snapshot.jpg
See also: Link: http://stackoverflow.com/a/11094891
"""
from cv2 import *
from os import remove, stat
from os.path import isfile
import requests
def imageCaptureFromIP(cameraUrl, username, password, imageFileName):
# See: http://stackoverflow.com/a/13137873
try:
r = requests.get(cameraUrl, auth=(username, password), timeout=10, stream=True)
except Exception:
# TODO: better to handle exceptions as in:
# http://docs.python-requests.org/en/latest/user/quickstart/#errors-and-exceptions
return False
if r.status_code != 200:
return False
with open(imageFileName, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
if not isfile(imageFileName):
return False
statinfo = stat(imageFileName)
if statinfo.st_size == 0:
remove(imageFileName)
return False
return True
def imageCaptureFromUSB(cameraNumber, imageFileName):
# initialize the camera
cam = VideoCapture(cameraNumber)
s, img = cam.read()
if not s:
# frame captured returns errors
return False
imwrite(imageFileName, img) #save JPG image
return True
def imageCapture(cameraDesc, imageFileName):
camProtAndAddr = cameraDesc['source'].split('://')
if camProtAndAddr[0] == 'usb':
s = imageCaptureFromUSB(eval(camProtAndAddr[1]), imageFileName)
elif camProtAndAddr[0] == 'http':
s = imageCaptureFromIP(cameraDesc['source'],
cameraDesc['optional-auth']['user-name'],
cameraDesc['optional-auth']['password'],
imageFileName)
else:
s = False
return s
if __name__ == "__main__":
from camshotcfg import ConfigDataLoad
from datetime import datetime
from os import makedirs, path
cfg = ConfigDataLoad('camshotcfg.json')
# Make the grabbed picture file path
now = datetime.now()
picturesDirName = '{0:s}/CAMSHOT_{1:%Y%m%d}'.format(cfg.data['camshot-datastore'], now)
try:
makedirs(picturesDirName)
except OSError, e:
if not path.isdir(picturesDirName):
# If the directory doesn't already exist, there was an error on creation
print "{0}: create directory {1} [OS errno {2}]: {3}".format(MAIN_SCRIPT_NAME, picturesDirName, e.errno, e.strerror)
cameraIndex = 0
for camera in cfg.data['cameras-list']:
print 'Get image from', camera['source']
pictureFileFullName = '{0:s}/CS{1:%Y%m%d%H%M}_{2:02d}.jpg'.format(picturesDirName, now, cameraIndex)
print 'Save in',pictureFileFullName
s = imageCapture(camera, pictureFileFullName)
if not s:
print '...Fail'
cameraIndex = cameraIndex + 1
| {
"content_hash": "6586d6890fcf41ec58cb68dbd714e051",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 128,
"avg_line_length": 34.364583333333336,
"alnum_prop": 0.6489845407699303,
"repo_name": "corerd/camshot",
"id": "355f2169dd1774e8bf533094039ea150edd6551b",
"size": "4442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camgrab.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144204"
}
],
"symlink_target": ""
} |
"""Test the RPC call related to the uptime command.
Test corresponds to code in rpc/server.cpp.
"""
import time
from test_framework.test_framework import VergeTestFramework
class UptimeTest(VergeTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self._test_uptime()
def _test_uptime(self):
wait_time = 10
self.nodes[0].setmocktime(int(time.time() + wait_time))
assert(self.nodes[0].uptime() >= wait_time)
if __name__ == '__main__':
UptimeTest().main()
| {
"content_hash": "3015d4ed227ec0c9f988162e9ff28c7b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 63,
"avg_line_length": 22.53846153846154,
"alnum_prop": 0.6416382252559727,
"repo_name": "vergecurrency/VERGE",
"id": "adda324e2854f106cbb49100e6e7bd9f4bb54442",
"size": "795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/rpc_uptime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "4146809"
},
{
"name": "C++",
"bytes": "6598251"
},
{
"name": "CMake",
"bytes": "58"
},
{
"name": "Dockerfile",
"bytes": "7558"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "30330"
},
{
"name": "M4",
"bytes": "214935"
},
{
"name": "Makefile",
"bytes": "106252"
},
{
"name": "Objective-C",
"bytes": "4891"
},
{
"name": "Objective-C++",
"bytes": "6640"
},
{
"name": "Python",
"bytes": "1404272"
},
{
"name": "QMake",
"bytes": "754"
},
{
"name": "Shell",
"bytes": "102969"
}
],
"symlink_target": ""
} |
import logging
bm_logger = None
def setup_logging(logfile):
"""\brief Sets up a simple logger and exposes it globally
\param logfile (\c string) The path to the file to log to
"""
global bm_logger
bm_logger = logging.getLogger()
hdlr = logging.FileHandler(logfile)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
bm_logger.addHandler(hdlr)
bm_logger.setLevel(logging.INFO)
| {
"content_hash": "dc21c9c84e696fd433e169e21ad4b8de",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 74,
"avg_line_length": 31.133333333333333,
"alnum_prop": 0.69593147751606,
"repo_name": "cnplab/blockmon",
"id": "f0bee594c5b009e01ebd5e6f5f40e76f531979ce",
"size": "2283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemon/core/bmlogging.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "136416"
},
{
"name": "C++",
"bytes": "1147895"
},
{
"name": "CMake",
"bytes": "6197"
},
{
"name": "Makefile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "156101"
},
{
"name": "VHDL",
"bytes": "1593300"
}
],
"symlink_target": ""
} |
"""Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import math
from copy import deepcopy
from functools import partial
import difflib
import webbrowser
from warnings import warn
import tempfile
import numpy as np
from ..io import show_fiff
from ..utils import verbose
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
DEFAULTS = dict(color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k', exci='k', ias='k', syst='k'),
units=dict(eeg='uV', grad='fT/cm', mag='fT', misc='AU'),
scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0),
scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
eog=150e-6, ecg=5e-4, emg=1e-3,
ref_meg=1e-12, misc=1e-3,
stim=1, resp=1, chpi=1e-4, exci=1,
ias=1, syst=1),
ylim=dict(mag=(-600., 600.), grad=(-200., 200.),
eeg=(-200., 200.), misc=(-5., 5.)),
titles=dict(eeg='EEG', grad='Gradiometers',
mag='Magnetometers', misc='misc'),
mask_params=dict(marker='o',
markerfacecolor='w',
markeredgecolor='k',
linewidth=0,
markeredgewidth=1,
markersize=4))
def _mutable_defaults(*mappings):
""" To avoid dicts as default keyword arguments
Use this function instead to resolve default dict values.
Example usage:
scalings, units = _mutable_defaults(('scalings', scalings,
'units', units))
"""
out = []
for k, v in mappings:
this_mapping = DEFAULTS[k]
if v is not None:
this_mapping = deepcopy(DEFAULTS[k])
this_mapping.update(v)
out += [this_mapping]
return out
def _setup_vmin_vmax(data, vmin, vmax):
"""Aux function to handle vmin and vamx parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmin is None:
vmax = np.max(data)
return vmin, vmax
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.canvas.draw()
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except:
msg = ('Matplotlib function \'tight_layout\'%s.'
' Skipping subpplot adjusment.')
if not hasattr(plt, 'tight_layout'):
case = ' is not available'
else:
case = (' is not supported by your backend: `%s`'
% plt.get_backend())
warn(msg % case)
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all([p['active'] for p in container.info['projs']]):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3
Bounds for the colormap.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
l = np.asarray(limits, dtype='float')
if len(l) != 3:
raise ValueError('limits must have 3 elements')
if any(l < 0):
raise ValueError('limits must all be positive')
if any(np.diff(l) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
l = (np.concatenate((-np.flipud(l), l)) + l[-1]) / (2 * l[-1])
cdict = {'red': ((l[0], 0.0, 0.0),
(l[1], 0.0, 0.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 1.0, 1.0),
(l[5], 1.0, 1.0)),
'green': ((l[0], 1.0, 1.0),
(l[1], 0.0, 0.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 0.0, 0.0),
(l[5], 1.0, 1.0)),
'blue': ((l[0], 1.0, 1.0),
(l[1], 1.0, 1.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 0.0, 0.0),
(l[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
l = np.concatenate((-np.flipud(l), [0], l)) / l[-1]
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, l, 255 * c) for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_opts'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_opts'])
del params['proj_checks']
params['fig_opts'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if not 'proj_bools' in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
import matplotlib as mpl
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
ax_temp = plt.axes((0, 0, 1, 1))
ax_temp.get_yaxis().set_visible(False)
ax_temp.get_xaxis().set_visible(False)
fig_proj.add_axes(ax_temp)
proj_checks = mpl.widgets.CheckButtons(ax_temp, labels=labels,
actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show()
except Exception:
pass
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'w')
else:
f = tempfile.NamedTemporaryFile('w', delete=False)
fname_out = f.name
with f as fid:
fid.write(diff)
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
import matplotlib.pyplot as plt
import matplotlib as mpl
old_val = mpl.rcParams['toolbar']
try:
mpl.rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
mpl.rcParams['toolbar'] = old_val
return fig
| {
"content_hash": "4a77a27f91035b87c7d9be3a09949b43",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 79,
"avg_line_length": 35.244505494505496,
"alnum_prop": 0.5478992906695768,
"repo_name": "jaeilepp/eggie",
"id": "88c5e5b5f4f252969e4c1d9b809b02277b79e9b0",
"size": "12829",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mne/viz/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3357472"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.