body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def test_unicode_doctest(self, testdir):
'\n Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii\n characters.\n '
p = testdir.maketxtfile(test_unicode_doctest='\n .. doctest::\n\n >>> print(\n ... "Hi\\n\\nByé")\n Hi\n ...\n Byé\n >>> 1/0 # Byé\n 1\n ')
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(['*UNEXPECTED EXCEPTION: ZeroDivisionError*', '*1 failed*']) | -2,616,174,084,683,177,500 | Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters. | testing/test_doctest.py | test_unicode_doctest | NNRepos/pytest | python | def test_unicode_doctest(self, testdir):
'\n Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii\n characters.\n '
p = testdir.maketxtfile(test_unicode_doctest='\n .. doctest::\n\n >>> print(\n ... "Hi\\n\\nByé")\n Hi\n ...\n Byé\n >>> 1/0 # Byé\n 1\n ')
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(['*UNEXPECTED EXCEPTION: ZeroDivisionError*', '*1 failed*']) |
def test_unicode_doctest_module(self, testdir):
'\n Test case for issue 2434: DecodeError on Python 2 when doctest docstring\n contains non-ascii characters.\n '
p = testdir.makepyfile(test_unicode_doctest_module='\n def fix_bad_unicode(text):\n \'\'\'\n >>> print(fix_bad_unicode(\'único\'))\n único\n \'\'\'\n return "único"\n ')
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['* 1 passed *']) | 1,344,466,552,277,072,100 | Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters. | testing/test_doctest.py | test_unicode_doctest_module | NNRepos/pytest | python | def test_unicode_doctest_module(self, testdir):
'\n Test case for issue 2434: DecodeError on Python 2 when doctest docstring\n contains non-ascii characters.\n '
p = testdir.makepyfile(test_unicode_doctest_module='\n def fix_bad_unicode(text):\n \'\'\'\n >>> print(fix_bad_unicode(\'único\'))\n único\n \'\'\'\n return "único"\n ')
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['* 1 passed *']) |
def test_print_unicode_value(self, testdir):
"\n Test case for issue 3583: Printing Unicode in doctest under Python 2.7\n doesn't work\n "
p = testdir.maketxtfile(test_print_unicode_value="\n Here is a doctest::\n\n >>> print('\\xE5\\xE9\\xEE\\xF8\\xFC')\n åéîøü\n ")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(['* 1 passed *']) | 958,529,165,784,957,400 | Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work | testing/test_doctest.py | test_print_unicode_value | NNRepos/pytest | python | def test_print_unicode_value(self, testdir):
"\n Test case for issue 3583: Printing Unicode in doctest under Python 2.7\n doesn't work\n "
p = testdir.maketxtfile(test_print_unicode_value="\n Here is a doctest::\n\n >>> print('\\xE5\\xE9\\xEE\\xF8\\xFC')\n åéîøü\n ")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(['* 1 passed *']) |
def test_reportinfo(self, testdir):
'\n Test case to make sure that DoctestItem.reportinfo() returns lineno.\n '
p = testdir.makepyfile(test_reportinfo="\n def foo(x):\n '''\n >>> foo('a')\n 'b'\n '''\n return 'c'\n ")
(items, reprec) = testdir.inline_genitems(p, '--doctest-modules')
reportinfo = items[0].reportinfo()
assert (reportinfo[1] == 1) | 4,596,726,916,877,206,500 | Test case to make sure that DoctestItem.reportinfo() returns lineno. | testing/test_doctest.py | test_reportinfo | NNRepos/pytest | python | def test_reportinfo(self, testdir):
'\n \n '
p = testdir.makepyfile(test_reportinfo="\n def foo(x):\n '\n >>> foo('a')\n 'b'\n '\n return 'c'\n ")
(items, reprec) = testdir.inline_genitems(p, '--doctest-modules')
reportinfo = items[0].reportinfo()
assert (reportinfo[1] == 1) |
def test_valid_setup_py(self, testdir):
'\n Test to make sure that pytest ignores valid setup.py files when ran\n with --doctest-modules\n '
p = testdir.makepyfile(setup="\n from setuptools import setup, find_packages\n setup(name='sample',\n version='0.0',\n description='description',\n packages=find_packages()\n )\n ")
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['*collected 0 items*']) | 2,163,101,895,664,856,000 | Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules | testing/test_doctest.py | test_valid_setup_py | NNRepos/pytest | python | def test_valid_setup_py(self, testdir):
'\n Test to make sure that pytest ignores valid setup.py files when ran\n with --doctest-modules\n '
p = testdir.makepyfile(setup="\n from setuptools import setup, find_packages\n setup(name='sample',\n version='0.0',\n description='description',\n packages=find_packages()\n )\n ")
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['*collected 0 items*']) |
def test_invalid_setup_py(self, testdir):
'\n Test to make sure that pytest reads setup.py files that are not used\n for python packages when ran with --doctest-modules\n '
p = testdir.makepyfile(setup="\n def test_foo():\n return 'bar'\n ")
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['*collected 1 item*']) | 410,459,342,461,927,800 | Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules | testing/test_doctest.py | test_invalid_setup_py | NNRepos/pytest | python | def test_invalid_setup_py(self, testdir):
'\n Test to make sure that pytest reads setup.py files that are not used\n for python packages when ran with --doctest-modules\n '
p = testdir.makepyfile(setup="\n def test_foo():\n return 'bar'\n ")
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['*collected 1 item*']) |
@pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_allow_unicode(self, testdir, config_mode):
'Test that doctests which output unicode work in all python versions\n tested by pytest when the ALLOW_UNICODE option is used (either in\n the ini file or by an inline comment).\n '
if (config_mode == 'ini'):
testdir.makeini('\n [pytest]\n doctest_optionflags = ALLOW_UNICODE\n ')
comment = ''
else:
comment = '#doctest: +ALLOW_UNICODE'
testdir.maketxtfile(test_doc="\n >>> b'12'.decode('ascii') {comment}\n '12'\n ".format(comment=comment))
testdir.makepyfile(foo="\n def foo():\n '''\n >>> b'12'.decode('ascii') {comment}\n '12'\n '''\n ".format(comment=comment))
reprec = testdir.inline_run('--doctest-modules')
reprec.assertoutcome(passed=2) | 7,622,682,363,274,835,000 | Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment). | testing/test_doctest.py | test_allow_unicode | NNRepos/pytest | python | @pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_allow_unicode(self, testdir, config_mode):
'Test that doctests which output unicode work in all python versions\n tested by pytest when the ALLOW_UNICODE option is used (either in\n the ini file or by an inline comment).\n '
if (config_mode == 'ini'):
testdir.makeini('\n [pytest]\n doctest_optionflags = ALLOW_UNICODE\n ')
comment =
else:
comment = '#doctest: +ALLOW_UNICODE'
testdir.maketxtfile(test_doc="\n >>> b'12'.decode('ascii') {comment}\n '12'\n ".format(comment=comment))
testdir.makepyfile(foo="\n def foo():\n '\n >>> b'12'.decode('ascii') {comment}\n '12'\n '\n ".format(comment=comment))
reprec = testdir.inline_run('--doctest-modules')
reprec.assertoutcome(passed=2) |
@pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_allow_bytes(self, testdir, config_mode):
'Test that doctests which output bytes work in all python versions\n tested by pytest when the ALLOW_BYTES option is used (either in\n the ini file or by an inline comment)(#1287).\n '
if (config_mode == 'ini'):
testdir.makeini('\n [pytest]\n doctest_optionflags = ALLOW_BYTES\n ')
comment = ''
else:
comment = '#doctest: +ALLOW_BYTES'
testdir.maketxtfile(test_doc="\n >>> b'foo' {comment}\n 'foo'\n ".format(comment=comment))
testdir.makepyfile(foo="\n def foo():\n '''\n >>> b'foo' {comment}\n 'foo'\n '''\n ".format(comment=comment))
reprec = testdir.inline_run('--doctest-modules')
reprec.assertoutcome(passed=2) | 8,425,206,529,641,055,000 | Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287). | testing/test_doctest.py | test_allow_bytes | NNRepos/pytest | python | @pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_allow_bytes(self, testdir, config_mode):
'Test that doctests which output bytes work in all python versions\n tested by pytest when the ALLOW_BYTES option is used (either in\n the ini file or by an inline comment)(#1287).\n '
if (config_mode == 'ini'):
testdir.makeini('\n [pytest]\n doctest_optionflags = ALLOW_BYTES\n ')
comment =
else:
comment = '#doctest: +ALLOW_BYTES'
testdir.maketxtfile(test_doc="\n >>> b'foo' {comment}\n 'foo'\n ".format(comment=comment))
testdir.makepyfile(foo="\n def foo():\n '\n >>> b'foo' {comment}\n 'foo'\n '\n ".format(comment=comment))
reprec = testdir.inline_run('--doctest-modules')
reprec.assertoutcome(passed=2) |
def test_unicode_string(self, testdir):
'Test that doctests which output unicode fail in Python 2 when\n the ALLOW_UNICODE option is not used. The same test should pass\n in Python 3.\n '
testdir.maketxtfile(test_doc="\n >>> b'12'.decode('ascii')\n '12'\n ")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1) | -3,410,570,945,633,879,000 | Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3. | testing/test_doctest.py | test_unicode_string | NNRepos/pytest | python | def test_unicode_string(self, testdir):
'Test that doctests which output unicode fail in Python 2 when\n the ALLOW_UNICODE option is not used. The same test should pass\n in Python 3.\n '
testdir.maketxtfile(test_doc="\n >>> b'12'.decode('ascii')\n '12'\n ")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1) |
def test_bytes_literal(self, testdir):
'Test that doctests which output bytes fail in Python 3 when\n the ALLOW_BYTES option is not used. (#1287).\n '
testdir.maketxtfile(test_doc="\n >>> b'foo'\n 'foo'\n ")
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1) | 4,550,843,990,118,094,000 | Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287). | testing/test_doctest.py | test_bytes_literal | NNRepos/pytest | python | def test_bytes_literal(self, testdir):
'Test that doctests which output bytes fail in Python 3 when\n the ALLOW_BYTES option is not used. (#1287).\n '
testdir.maketxtfile(test_doc="\n >>> b'foo'\n 'foo'\n ")
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1) |
@pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_number_precision(self, testdir, config_mode):
'Test the NUMBER option.'
if (config_mode == 'ini'):
testdir.makeini('\n [pytest]\n doctest_optionflags = NUMBER\n ')
comment = ''
else:
comment = '#doctest: +NUMBER'
testdir.maketxtfile(test_doc="\n\n Scalars:\n\n >>> import math\n >>> math.pi {comment}\n 3.141592653589793\n >>> math.pi {comment}\n 3.1416\n >>> math.pi {comment}\n 3.14\n >>> -math.pi {comment}\n -3.14\n >>> math.pi {comment}\n 3.\n >>> 3. {comment}\n 3.0\n >>> 3. {comment}\n 3.\n >>> 3. {comment}\n 3.01\n >>> 3. {comment}\n 2.99\n >>> .299 {comment}\n .3\n >>> .301 {comment}\n .3\n >>> 951. {comment}\n 1e3\n >>> 1049. {comment}\n 1e3\n >>> -1049. {comment}\n -1e3\n >>> 1e3 {comment}\n 1e3\n >>> 1e3 {comment}\n 1000.\n\n Lists:\n\n >>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}\n [3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]\n >>> [[0.333, 0.667], [0.999, 1.333]] {comment}\n [[0.33, 0.667], [0.999, 1.333]]\n >>> [[[0.101]]] {comment}\n [[[0.1]]]\n\n Doesn't barf on non-numbers:\n\n >>> 'abc' {comment}\n 'abc'\n >>> None {comment}\n ".format(comment=comment))
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1) | -1,293,729,877,081,996,500 | Test the NUMBER option. | testing/test_doctest.py | test_number_precision | NNRepos/pytest | python | @pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_number_precision(self, testdir, config_mode):
if (config_mode == 'ini'):
testdir.makeini('\n [pytest]\n doctest_optionflags = NUMBER\n ')
comment =
else:
comment = '#doctest: +NUMBER'
testdir.maketxtfile(test_doc="\n\n Scalars:\n\n >>> import math\n >>> math.pi {comment}\n 3.141592653589793\n >>> math.pi {comment}\n 3.1416\n >>> math.pi {comment}\n 3.14\n >>> -math.pi {comment}\n -3.14\n >>> math.pi {comment}\n 3.\n >>> 3. {comment}\n 3.0\n >>> 3. {comment}\n 3.\n >>> 3. {comment}\n 3.01\n >>> 3. {comment}\n 2.99\n >>> .299 {comment}\n .3\n >>> .301 {comment}\n .3\n >>> 951. {comment}\n 1e3\n >>> 1049. {comment}\n 1e3\n >>> -1049. {comment}\n -1e3\n >>> 1e3 {comment}\n 1e3\n >>> 1e3 {comment}\n 1000.\n\n Lists:\n\n >>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}\n [3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]\n >>> [[0.333, 0.667], [0.999, 1.333]] {comment}\n [[0.33, 0.667], [0.999, 1.333]]\n >>> [[[0.101]]] {comment}\n [[[0.1]]]\n\n Doesn't barf on non-numbers:\n\n >>> 'abc' {comment}\n 'abc'\n >>> None {comment}\n ".format(comment=comment))
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1) |
def test_doctest_module_session_fixture(self, testdir):
'Test that session fixtures are initialized for doctest modules (#768)\n '
testdir.makeconftest("\n import pytest\n import sys\n\n @pytest.yield_fixture(autouse=True, scope='session')\n def myfixture():\n assert not hasattr(sys, 'pytest_session_data')\n sys.pytest_session_data = 1\n yield\n del sys.pytest_session_data\n ")
testdir.makepyfile(foo="\n import sys\n\n def foo():\n '''\n >>> assert sys.pytest_session_data == 1\n '''\n\n def bar():\n '''\n >>> assert sys.pytest_session_data == 1\n '''\n ")
result = testdir.runpytest('--doctest-modules')
result.stdout.fnmatch_lines(['*2 passed*']) | -7,865,538,692,801,229,000 | Test that session fixtures are initialized for doctest modules (#768) | testing/test_doctest.py | test_doctest_module_session_fixture | NNRepos/pytest | python | def test_doctest_module_session_fixture(self, testdir):
'\n '
testdir.makeconftest("\n import pytest\n import sys\n\n @pytest.yield_fixture(autouse=True, scope='session')\n def myfixture():\n assert not hasattr(sys, 'pytest_session_data')\n sys.pytest_session_data = 1\n yield\n del sys.pytest_session_data\n ")
testdir.makepyfile(foo="\n import sys\n\n def foo():\n '\n >>> assert sys.pytest_session_data == 1\n '\n\n def bar():\n '\n >>> assert sys.pytest_session_data == 1\n '\n ")
result = testdir.runpytest('--doctest-modules')
result.stdout.fnmatch_lines(['*2 passed*']) |
@pytest.mark.parametrize('scope', SCOPES)
@pytest.mark.parametrize('enable_doctest', [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
'Test that auto-use fixtures work properly with doctest modules.\n See #1057 and #1100.\n '
testdir.makeconftest('\n import pytest\n\n @pytest.fixture(autouse=True, scope="{scope}")\n def auto(request):\n return 99\n '.format(scope=scope))
testdir.makepyfile(test_1='\n def test_foo():\n """\n >>> getfixture(\'auto\') + 1\n 100\n """\n def test_bar():\n assert 1\n ')
params = (('--doctest-modules',) if enable_doctest else ())
passes = (3 if enable_doctest else 2)
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines([('*=== %d passed in *' % passes)]) | -4,703,684,174,382,545,000 | Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100. | testing/test_doctest.py | test_fixture_scopes | NNRepos/pytest | python | @pytest.mark.parametrize('scope', SCOPES)
@pytest.mark.parametrize('enable_doctest', [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
'Test that auto-use fixtures work properly with doctest modules.\n See #1057 and #1100.\n '
testdir.makeconftest('\n import pytest\n\n @pytest.fixture(autouse=True, scope="{scope}")\n def auto(request):\n return 99\n '.format(scope=scope))
testdir.makepyfile(test_1='\n def test_foo():\n "\n >>> getfixture(\'auto\') + 1\n 100\n "\n def test_bar():\n assert 1\n ')
params = (('--doctest-modules',) if enable_doctest else ())
passes = (3 if enable_doctest else 2)
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines([('*=== %d passed in *' % passes)]) |
@pytest.mark.parametrize('scope', SCOPES)
@pytest.mark.parametrize('autouse', [True, False])
@pytest.mark.parametrize('use_fixture_in_doctest', [True, False])
def test_fixture_module_doctest_scopes(self, testdir, scope, autouse, use_fixture_in_doctest):
'Test that auto-use fixtures work properly with doctest files.\n See #1057 and #1100.\n '
testdir.makeconftest('\n import pytest\n\n @pytest.fixture(autouse={autouse}, scope="{scope}")\n def auto(request):\n return 99\n '.format(scope=scope, autouse=autouse))
if use_fixture_in_doctest:
testdir.maketxtfile(test_doc="\n >>> getfixture('auto')\n 99\n ")
else:
testdir.maketxtfile(test_doc='\n >>> 1 + 1\n 2\n ')
result = testdir.runpytest('--doctest-modules')
result.stdout.no_fnmatch_line('*FAILURES*')
result.stdout.fnmatch_lines(['*=== 1 passed in *']) | -3,690,328,013,666,819,600 | Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100. | testing/test_doctest.py | test_fixture_module_doctest_scopes | NNRepos/pytest | python | @pytest.mark.parametrize('scope', SCOPES)
@pytest.mark.parametrize('autouse', [True, False])
@pytest.mark.parametrize('use_fixture_in_doctest', [True, False])
def test_fixture_module_doctest_scopes(self, testdir, scope, autouse, use_fixture_in_doctest):
'Test that auto-use fixtures work properly with doctest files.\n See #1057 and #1100.\n '
testdir.makeconftest('\n import pytest\n\n @pytest.fixture(autouse={autouse}, scope="{scope}")\n def auto(request):\n return 99\n '.format(scope=scope, autouse=autouse))
if use_fixture_in_doctest:
testdir.maketxtfile(test_doc="\n >>> getfixture('auto')\n 99\n ")
else:
testdir.maketxtfile(test_doc='\n >>> 1 + 1\n 2\n ')
result = testdir.runpytest('--doctest-modules')
result.stdout.no_fnmatch_line('*FAILURES*')
result.stdout.fnmatch_lines(['*=== 1 passed in *']) |
@pytest.mark.parametrize('scope', SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
'Check that all attributes of a request in an autouse fixture\n behave as expected when requested for a doctest item.\n '
testdir.makeconftest('\n import pytest\n\n @pytest.fixture(autouse=True, scope="{scope}")\n def auto(request):\n if "{scope}" == \'module\':\n assert request.module is None\n if "{scope}" == \'class\':\n assert request.cls is None\n if "{scope}" == \'function\':\n assert request.function is None\n return 99\n '.format(scope=scope))
testdir.maketxtfile(test_doc='\n >>> 1 + 1\n 2\n ')
result = testdir.runpytest('--doctest-modules')
str(result.stdout.no_fnmatch_line('*FAILURES*'))
result.stdout.fnmatch_lines(['*=== 1 passed in *']) | 4,731,622,570,318,291,000 | Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item. | testing/test_doctest.py | test_auto_use_request_attributes | NNRepos/pytest | python | @pytest.mark.parametrize('scope', SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
'Check that all attributes of a request in an autouse fixture\n behave as expected when requested for a doctest item.\n '
testdir.makeconftest('\n import pytest\n\n @pytest.fixture(autouse=True, scope="{scope}")\n def auto(request):\n if "{scope}" == \'module\':\n assert request.module is None\n if "{scope}" == \'class\':\n assert request.cls is None\n if "{scope}" == \'function\':\n assert request.function is None\n return 99\n '.format(scope=scope))
testdir.maketxtfile(test_doc='\n >>> 1 + 1\n 2\n ')
result = testdir.runpytest('--doctest-modules')
str(result.stdout.no_fnmatch_line('*FAILURES*'))
result.stdout.fnmatch_lines(['*=== 1 passed in *']) |
@pytest.mark.parametrize('scope', SCOPES)
def test_namespace_doctestfile(self, testdir, scope):
'\n Check that inserting something into the namespace works in a\n simple text file doctest\n '
testdir.makeconftest('\n import pytest\n import contextlib\n\n @pytest.fixture(autouse=True, scope="{scope}")\n def add_contextlib(doctest_namespace):\n doctest_namespace[\'cl\'] = contextlib\n '.format(scope=scope))
p = testdir.maketxtfile('\n >>> print(cl.__name__)\n contextlib\n ')
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1) | 925,541,292,342,811,300 | Check that inserting something into the namespace works in a
simple text file doctest | testing/test_doctest.py | test_namespace_doctestfile | NNRepos/pytest | python | @pytest.mark.parametrize('scope', SCOPES)
def test_namespace_doctestfile(self, testdir, scope):
'\n Check that inserting something into the namespace works in a\n simple text file doctest\n '
testdir.makeconftest('\n import pytest\n import contextlib\n\n @pytest.fixture(autouse=True, scope="{scope}")\n def add_contextlib(doctest_namespace):\n doctest_namespace[\'cl\'] = contextlib\n '.format(scope=scope))
p = testdir.maketxtfile('\n >>> print(cl.__name__)\n contextlib\n ')
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1) |
@pytest.mark.parametrize('scope', SCOPES)
def test_namespace_pyfile(self, testdir, scope):
'\n Check that inserting something into the namespace works in a\n simple Python file docstring doctest\n '
testdir.makeconftest('\n import pytest\n import contextlib\n\n @pytest.fixture(autouse=True, scope="{scope}")\n def add_contextlib(doctest_namespace):\n doctest_namespace[\'cl\'] = contextlib\n '.format(scope=scope))
p = testdir.makepyfile("\n def foo():\n '''\n >>> print(cl.__name__)\n contextlib\n '''\n ")
reprec = testdir.inline_run(p, '--doctest-modules')
reprec.assertoutcome(passed=1) | -5,751,900,679,091,508,000 | Check that inserting something into the namespace works in a
simple Python file docstring doctest | testing/test_doctest.py | test_namespace_pyfile | NNRepos/pytest | python | @pytest.mark.parametrize('scope', SCOPES)
def test_namespace_pyfile(self, testdir, scope):
'\n Check that inserting something into the namespace works in a\n simple Python file docstring doctest\n '
testdir.makeconftest('\n import pytest\n import contextlib\n\n @pytest.fixture(autouse=True, scope="{scope}")\n def add_contextlib(doctest_namespace):\n doctest_namespace[\'cl\'] = contextlib\n '.format(scope=scope))
p = testdir.makepyfile("\n def foo():\n '\n >>> print(cl.__name__)\n contextlib\n '\n ")
reprec = testdir.inline_run(p, '--doctest-modules')
reprec.assertoutcome(passed=1) |
def callback(update: Update, _: CallbackContext):
'Print the help text for a /start or /help command'
update.message.reply_text(helper.create_help_text()) | 5,267,084,407,609,486,000 | Print the help text for a /start or /help command | ongabot/handler/helpcommand.py | callback | walkerjens/telegram.ongabot | python | def callback(update: Update, _: CallbackContext):
update.message.reply_text(helper.create_help_text()) |
def pattern_sixteen(steps):
' Pattern sixteen\n\n 9\n 9 8\n 9 8 7\n 9 8 7 6\n 9 8 7 6 5\n 9 8 7 6 5 4\n 9 8 7 6 5 4 3\n 9 8 7 6 5 4 3 2\n 9 8 7 6 5 4 3 2 1\n '
get_range = [str(i) for i in range(1, (steps + 1))][::(- 1)]
for gr in range(1, (len(get_range) + 1)):
join = ' '.join(get_range[:gr])
print(join) | 7,583,793,884,200,287,000 | Pattern sixteen
9
9 8
9 8 7
9 8 7 6
9 8 7 6 5
9 8 7 6 5 4
9 8 7 6 5 4 3
9 8 7 6 5 4 3 2
9 8 7 6 5 4 3 2 1 | Project Pattern/pattern_16.py | pattern_sixteen | chandthash/nppy | python | def pattern_sixteen(steps):
' Pattern sixteen\n\n 9\n 9 8\n 9 8 7\n 9 8 7 6\n 9 8 7 6 5\n 9 8 7 6 5 4\n 9 8 7 6 5 4 3\n 9 8 7 6 5 4 3 2\n 9 8 7 6 5 4 3 2 1\n '
get_range = [str(i) for i in range(1, (steps + 1))][::(- 1)]
for gr in range(1, (len(get_range) + 1)):
join = ' '.join(get_range[:gr])
print(join) |
def setup(hass, config):
'Set up the StatsD component.'
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
sample_rate = conf.get(CONF_RATE)
prefix = conf.get(CONF_PREFIX)
value_mapping = conf.get(CONF_VALUE_MAP)
show_attribute_flag = conf.get(CONF_ATTR)
statsd_client = statsd.StatsClient(host=host, port=port, prefix=prefix)
def statsd_event_listener(event):
'Listen for new messages on the bus and sends them to StatsD.'
state = event.data.get('new_state')
if (state is None):
return
try:
if (value_mapping and (state.state in value_mapping)):
_state = float(value_mapping[state.state])
else:
_state = state_helper.state_as_number(state)
except ValueError:
_state = None
states = dict(state.attributes)
_LOGGER.debug('Sending %s', state.entity_id)
if (show_attribute_flag is True):
if isinstance(_state, (float, int)):
statsd_client.gauge(('%s.state' % state.entity_id), _state, sample_rate)
for (key, value) in states.items():
if isinstance(value, (float, int)):
stat = ('%s.%s' % (state.entity_id, key.replace(' ', '_')))
statsd_client.gauge(stat, value, sample_rate)
elif isinstance(_state, (float, int)):
statsd_client.gauge(state.entity_id, _state, sample_rate)
statsd_client.incr(state.entity_id, rate=sample_rate)
hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener)
return True | -1,613,744,986,944,263,200 | Set up the StatsD component. | homeassistant/components/statsd/__init__.py | setup | 0x00-0xFF/home-assistant | python | def setup(hass, config):
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
sample_rate = conf.get(CONF_RATE)
prefix = conf.get(CONF_PREFIX)
value_mapping = conf.get(CONF_VALUE_MAP)
show_attribute_flag = conf.get(CONF_ATTR)
statsd_client = statsd.StatsClient(host=host, port=port, prefix=prefix)
def statsd_event_listener(event):
'Listen for new messages on the bus and sends them to StatsD.'
state = event.data.get('new_state')
if (state is None):
return
try:
if (value_mapping and (state.state in value_mapping)):
_state = float(value_mapping[state.state])
else:
_state = state_helper.state_as_number(state)
except ValueError:
_state = None
states = dict(state.attributes)
_LOGGER.debug('Sending %s', state.entity_id)
if (show_attribute_flag is True):
if isinstance(_state, (float, int)):
statsd_client.gauge(('%s.state' % state.entity_id), _state, sample_rate)
for (key, value) in states.items():
if isinstance(value, (float, int)):
stat = ('%s.%s' % (state.entity_id, key.replace(' ', '_')))
statsd_client.gauge(stat, value, sample_rate)
elif isinstance(_state, (float, int)):
statsd_client.gauge(state.entity_id, _state, sample_rate)
statsd_client.incr(state.entity_id, rate=sample_rate)
hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener)
return True |
def statsd_event_listener(event):
'Listen for new messages on the bus and sends them to StatsD.'
state = event.data.get('new_state')
if (state is None):
return
try:
if (value_mapping and (state.state in value_mapping)):
_state = float(value_mapping[state.state])
else:
_state = state_helper.state_as_number(state)
except ValueError:
_state = None
states = dict(state.attributes)
_LOGGER.debug('Sending %s', state.entity_id)
if (show_attribute_flag is True):
if isinstance(_state, (float, int)):
statsd_client.gauge(('%s.state' % state.entity_id), _state, sample_rate)
for (key, value) in states.items():
if isinstance(value, (float, int)):
stat = ('%s.%s' % (state.entity_id, key.replace(' ', '_')))
statsd_client.gauge(stat, value, sample_rate)
elif isinstance(_state, (float, int)):
statsd_client.gauge(state.entity_id, _state, sample_rate)
statsd_client.incr(state.entity_id, rate=sample_rate) | -1,950,298,841,944,332,300 | Listen for new messages on the bus and sends them to StatsD. | homeassistant/components/statsd/__init__.py | statsd_event_listener | 0x00-0xFF/home-assistant | python | def statsd_event_listener(event):
state = event.data.get('new_state')
if (state is None):
return
try:
if (value_mapping and (state.state in value_mapping)):
_state = float(value_mapping[state.state])
else:
_state = state_helper.state_as_number(state)
except ValueError:
_state = None
states = dict(state.attributes)
_LOGGER.debug('Sending %s', state.entity_id)
if (show_attribute_flag is True):
if isinstance(_state, (float, int)):
statsd_client.gauge(('%s.state' % state.entity_id), _state, sample_rate)
for (key, value) in states.items():
if isinstance(value, (float, int)):
stat = ('%s.%s' % (state.entity_id, key.replace(' ', '_')))
statsd_client.gauge(stat, value, sample_rate)
elif isinstance(_state, (float, int)):
statsd_client.gauge(state.entity_id, _state, sample_rate)
statsd_client.incr(state.entity_id, rate=sample_rate) |
def _timestamp(self):
'Returns the local time in milliseconds since the unix epoch'
return int((time.time() * 1000)) | 6,703,314,015,782,014,000 | Returns the local time in milliseconds since the unix epoch | ably/rest/auth.py | _timestamp | ably/ably-python | python | def _timestamp(self):
return int((time.time() * 1000)) |
def get_gate_url(self):
'\n Override this method to override the gate_url attribute.\n '
gate_url = (self.gate_url or settings.GATE_URL)
if (not gate_url):
raise ImproperlyConfigured('{0} is missing the gate_url attribute. Define {0}.gate_url, settings.GATE_URL, or override {0}.get_gate_url().'.format(self.__class__.__name__))
return str(gate_url) | -6,638,031,244,147,534,000 | Override this method to override the gate_url attribute. | gate/mixin.py | get_gate_url | n-serrette/wedding-website | python | def get_gate_url(self):
'\n \n '
gate_url = (self.gate_url or settings.GATE_URL)
if (not gate_url):
raise ImproperlyConfigured('{0} is missing the gate_url attribute. Define {0}.gate_url, settings.GATE_URL, or override {0}.get_gate_url().'.format(self.__class__.__name__))
return str(gate_url) |
def get_permission_denied_message(self):
'\n Override this method to override the permission_denied_message attribute.\n '
return self.permission_denied_message | 5,832,800,233,362,823,000 | Override this method to override the permission_denied_message attribute. | gate/mixin.py | get_permission_denied_message | n-serrette/wedding-website | python | def get_permission_denied_message(self):
'\n \n '
return self.permission_denied_message |
def get_redirect_field_name(self):
'\n Override this method to override the redirect_field_name attribute.\n '
return self.redirect_field_name | 3,652,313,246,061,635,600 | Override this method to override the redirect_field_name attribute. | gate/mixin.py | get_redirect_field_name | n-serrette/wedding-website | python | def get_redirect_field_name(self):
'\n \n '
return self.redirect_field_name |
def get_lock_test_func(self):
'\n Override this method to use a different test_func method.\n '
return self.lock_test_func | 6,096,030,275,831,517,000 | Override this method to use a different test_func method. | gate/mixin.py | get_lock_test_func | n-serrette/wedding-website | python | def get_lock_test_func(self):
'\n \n '
return self.lock_test_func |
def __init__(self, opt):
'Initialize this dataset class.\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n '
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, (opt.phase + 'A'))
self.dir_B = os.path.join(opt.dataroot_B, (opt.phase + 'B'))
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size))
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size))
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
btoA = (self.opt.direction == 'BtoA')
input_nc = (self.opt.output_nc if btoA else self.opt.input_nc)
output_nc = (self.opt.input_nc if btoA else self.opt.output_nc)
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1)) | 7,680,058,650,721,001,000 | Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions | data/unaligned_dataset.py | __init__ | sinhaharsh/pytorch-CycleGAN-and-pix2pix | python | def __init__(self, opt):
'Initialize this dataset class.\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n '
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, (opt.phase + 'A'))
self.dir_B = os.path.join(opt.dataroot_B, (opt.phase + 'B'))
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size))
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size))
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
btoA = (self.opt.direction == 'BtoA')
input_nc = (self.opt.output_nc if btoA else self.opt.input_nc)
output_nc = (self.opt.input_nc if btoA else self.opt.output_nc)
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1)) |
def __getitem__(self, index):
'Return a data point and its metadata information.\n\n Parameters:\n index (int) -- a random integer for data indexing\n\n Returns a dictionary that contains A, B, A_paths and B_paths\n A (tensor) -- an image in the input domain\n B (tensor) -- its corresponding image in the target domain\n A_paths (str) -- image paths\n B_paths (str) -- image paths\n '
A_path = self.A_paths[(index % self.A_size)]
if self.opt.serial_batches:
index_B = (index % self.B_size)
else:
index_B = random.randint(0, (self.B_size - 1))
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} | -7,054,115,083,042,761,000 | Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths | data/unaligned_dataset.py | __getitem__ | sinhaharsh/pytorch-CycleGAN-and-pix2pix | python | def __getitem__(self, index):
'Return a data point and its metadata information.\n\n Parameters:\n index (int) -- a random integer for data indexing\n\n Returns a dictionary that contains A, B, A_paths and B_paths\n A (tensor) -- an image in the input domain\n B (tensor) -- its corresponding image in the target domain\n A_paths (str) -- image paths\n B_paths (str) -- image paths\n '
A_path = self.A_paths[(index % self.A_size)]
if self.opt.serial_batches:
index_B = (index % self.B_size)
else:
index_B = random.randint(0, (self.B_size - 1))
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} |
def __len__(self):
'Return the total number of images in the dataset.\n\n As we have two datasets with potentially different number of images,\n we take a maximum of\n '
return max(self.A_size, self.B_size) | 91,132,351,507,256,300 | Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of | data/unaligned_dataset.py | __len__ | sinhaharsh/pytorch-CycleGAN-and-pix2pix | python | def __len__(self):
'Return the total number of images in the dataset.\n\n As we have two datasets with potentially different number of images,\n we take a maximum of\n '
return max(self.A_size, self.B_size) |
def insert_pt(self, new_pt):
'returns index of new point'
i = bisect(self.points, (new_pt[0], None))
self.points.insert(i, new_pt)
dispatcher.send('points changed', sender=self)
return i | 634,380,642,633,548,900 | returns index of new point | light9/curvecalc/curve.py | insert_pt | drewp/light9 | python | def insert_pt(self, new_pt):
i = bisect(self.points, (new_pt[0], None))
self.points.insert(i, new_pt)
dispatcher.send('points changed', sender=self)
return i |
def points_between(self, x1, x2):
'returns (x,y) points'
return [self.points[i] for i in self.indices_between(x1, x2)] | 169,765,610,131,725,920 | returns (x,y) points | light9/curvecalc/curve.py | points_between | drewp/light9 | python | def points_between(self, x1, x2):
return [self.points[i] for i in self.indices_between(x1, x2)] |
def point_before(self, x):
'(x,y) of the point left of x, or None'
leftidx = self.index_before(x)
if (leftidx is None):
return None
return self.points[leftidx] | -8,937,247,978,327,115,000 | (x,y) of the point left of x, or None | light9/curvecalc/curve.py | point_before | drewp/light9 | python | def point_before(self, x):
leftidx = self.index_before(x)
if (leftidx is None):
return None
return self.points[leftidx] |
def newCurve(self, ctx, label):
'\n Save type/label for a new :Curve resource.\n Pass the ctx where the main curve data (not the points) will go.\n '
if hasattr(self, 'curve'):
raise ValueError(('CurveResource already has a curve %r' % self.curve))
self.graph.patch(Patch(addQuads=[(self.uri, RDF.type, L9['Curve'], ctx), (self.uri, RDFS.label, label, ctx)]))
self.curve = Curve(self.uri)
self.curve.points.extend([(0, 0)])
self.saveCurve()
self.watchCurvePointChanges() | -7,079,151,998,791,664,000 | Save type/label for a new :Curve resource.
Pass the ctx where the main curve data (not the points) will go. | light9/curvecalc/curve.py | newCurve | drewp/light9 | python | def newCurve(self, ctx, label):
'\n Save type/label for a new :Curve resource.\n Pass the ctx where the main curve data (not the points) will go.\n '
if hasattr(self, 'curve'):
raise ValueError(('CurveResource already has a curve %r' % self.curve))
self.graph.patch(Patch(addQuads=[(self.uri, RDF.type, L9['Curve'], ctx), (self.uri, RDFS.label, label, ctx)]))
self.curve = Curve(self.uri)
self.curve.points.extend([(0, 0)])
self.saveCurve()
self.watchCurvePointChanges() |
def watchCurvePointChanges(self):
'start watching and saving changes to the graph'
dispatcher.connect(self.onChange, 'points changed', sender=self.curve) | 805,307,617,194,940,800 | start watching and saving changes to the graph | light9/curvecalc/curve.py | watchCurvePointChanges | drewp/light9 | python | def watchCurvePointChanges(self):
dispatcher.connect(self.onChange, 'points changed', sender=self.curve) |
def loadCurvesForSong(self):
"\n current curves will track song's curves.\n \n This fires 'add_curve' dispatcher events to announce the new curves.\n "
log.info('loadCurvesForSong')
dispatcher.send('clear_curves')
self.curveResources.clear()
self.markers = Markers(uri=None, pointsStorage='file')
self.currentSong = self.graph.value(self.session, L9['currentSong'])
if (self.currentSong is None):
return
for uri in sorted(self.graph.objects(self.currentSong, L9['curve'])):
try:
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.loadCurve()
curvename = self.graph.label(uri)
if (not curvename):
raise ValueError(('curve %r has no label' % uri))
dispatcher.send('add_curve', sender=self, uri=uri, label=curvename, curve=cr.curve)
except Exception as e:
log.error('loading %s failed: %s', uri, e)
basename = os.path.join(showconfig.curvesDir(), showconfig.songFilenameFromURI(self.currentSong))
try:
self.markers.load(('%s.markers' % basename))
except IOError:
print('no marker file found') | -2,281,801,921,494,547,500 | current curves will track song's curves.
This fires 'add_curve' dispatcher events to announce the new curves. | light9/curvecalc/curve.py | loadCurvesForSong | drewp/light9 | python | def loadCurvesForSong(self):
"\n current curves will track song's curves.\n \n This fires 'add_curve' dispatcher events to announce the new curves.\n "
log.info('loadCurvesForSong')
dispatcher.send('clear_curves')
self.curveResources.clear()
self.markers = Markers(uri=None, pointsStorage='file')
self.currentSong = self.graph.value(self.session, L9['currentSong'])
if (self.currentSong is None):
return
for uri in sorted(self.graph.objects(self.currentSong, L9['curve'])):
try:
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.loadCurve()
curvename = self.graph.label(uri)
if (not curvename):
raise ValueError(('curve %r has no label' % uri))
dispatcher.send('add_curve', sender=self, uri=uri, label=curvename, curve=cr.curve)
except Exception as e:
log.error('loading %s failed: %s', uri, e)
basename = os.path.join(showconfig.curvesDir(), showconfig.songFilenameFromURI(self.currentSong))
try:
self.markers.load(('%s.markers' % basename))
except IOError:
print('no marker file found') |
def save(self):
'writes a file for each curve with a name\n like basename-curvename, or saves them to the rdf graph'
basename = os.path.join(showconfig.curvesDir(), showconfig.songFilenameFromURI(self.currentSong))
patches = []
for cr in list(self.curveResources.values()):
patches.extend(cr.getSavePatches())
self.markers.save(('%s.markers' % basename))
for p in patches:
self.graph.patch(p) | 2,741,524,990,762,291,000 | writes a file for each curve with a name
like basename-curvename, or saves them to the rdf graph | light9/curvecalc/curve.py | save | drewp/light9 | python | def save(self):
'writes a file for each curve with a name\n like basename-curvename, or saves them to the rdf graph'
basename = os.path.join(showconfig.curvesDir(), showconfig.songFilenameFromURI(self.currentSong))
patches = []
for cr in list(self.curveResources.values()):
patches.extend(cr.getSavePatches())
self.markers.save(('%s.markers' % basename))
for p in patches:
self.graph.patch(p) |
def __init__(__self__, *, key: pulumi.Input[str], resource_arn: pulumi.Input[str], value: pulumi.Input[str]):
'\n The set of arguments for constructing a Tag resource.\n :param pulumi.Input[str] key: Tag name.\n :param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.\n :param pulumi.Input[str] value: Tag value.\n '
pulumi.set(__self__, 'key', key)
pulumi.set(__self__, 'resource_arn', resource_arn)
pulumi.set(__self__, 'value', value) | 3,258,824,647,970,220,000 | The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value. | sdk/python/pulumi_aws/ecs/tag.py | __init__ | pulumi/pulumi-aws | python | def __init__(__self__, *, key: pulumi.Input[str], resource_arn: pulumi.Input[str], value: pulumi.Input[str]):
'\n The set of arguments for constructing a Tag resource.\n :param pulumi.Input[str] key: Tag name.\n :param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.\n :param pulumi.Input[str] value: Tag value.\n '
pulumi.set(__self__, 'key', key)
pulumi.set(__self__, 'resource_arn', resource_arn)
pulumi.set(__self__, 'value', value) |
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
'\n Tag name.\n '
return pulumi.get(self, 'key') | -8,963,650,479,449,913,000 | Tag name. | sdk/python/pulumi_aws/ecs/tag.py | key | pulumi/pulumi-aws | python | @property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'key') |
@property
@pulumi.getter(name='resourceArn')
def resource_arn(self) -> pulumi.Input[str]:
'\n Amazon Resource Name (ARN) of the ECS resource to tag.\n '
return pulumi.get(self, 'resource_arn') | -537,349,869,984,642,240 | Amazon Resource Name (ARN) of the ECS resource to tag. | sdk/python/pulumi_aws/ecs/tag.py | resource_arn | pulumi/pulumi-aws | python | @property
@pulumi.getter(name='resourceArn')
def resource_arn(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'resource_arn') |
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
'\n Tag value.\n '
return pulumi.get(self, 'value') | -8,049,809,238,565,177,000 | Tag value. | sdk/python/pulumi_aws/ecs/tag.py | value | pulumi/pulumi-aws | python | @property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'value') |
def __init__(__self__, *, key: Optional[pulumi.Input[str]]=None, resource_arn: Optional[pulumi.Input[str]]=None, value: Optional[pulumi.Input[str]]=None):
'\n Input properties used for looking up and filtering Tag resources.\n :param pulumi.Input[str] key: Tag name.\n :param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.\n :param pulumi.Input[str] value: Tag value.\n '
if (key is not None):
pulumi.set(__self__, 'key', key)
if (resource_arn is not None):
pulumi.set(__self__, 'resource_arn', resource_arn)
if (value is not None):
pulumi.set(__self__, 'value', value) | -4,799,673,854,584,692,000 | Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value. | sdk/python/pulumi_aws/ecs/tag.py | __init__ | pulumi/pulumi-aws | python | def __init__(__self__, *, key: Optional[pulumi.Input[str]]=None, resource_arn: Optional[pulumi.Input[str]]=None, value: Optional[pulumi.Input[str]]=None):
'\n Input properties used for looking up and filtering Tag resources.\n :param pulumi.Input[str] key: Tag name.\n :param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.\n :param pulumi.Input[str] value: Tag value.\n '
if (key is not None):
pulumi.set(__self__, 'key', key)
if (resource_arn is not None):
pulumi.set(__self__, 'resource_arn', resource_arn)
if (value is not None):
pulumi.set(__self__, 'value', value) |
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
'\n Tag name.\n '
return pulumi.get(self, 'key') | 6,652,848,325,136,089,000 | Tag name. | sdk/python/pulumi_aws/ecs/tag.py | key | pulumi/pulumi-aws | python | @property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'key') |
@property
@pulumi.getter(name='resourceArn')
def resource_arn(self) -> Optional[pulumi.Input[str]]:
'\n Amazon Resource Name (ARN) of the ECS resource to tag.\n '
return pulumi.get(self, 'resource_arn') | -4,876,156,079,371,697,000 | Amazon Resource Name (ARN) of the ECS resource to tag. | sdk/python/pulumi_aws/ecs/tag.py | resource_arn | pulumi/pulumi-aws | python | @property
@pulumi.getter(name='resourceArn')
def resource_arn(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'resource_arn') |
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
'\n Tag value.\n '
return pulumi.get(self, 'value') | -1,863,364,334,913,555,700 | Tag value. | sdk/python/pulumi_aws/ecs/tag.py | value | pulumi/pulumi-aws | python | @property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'value') |
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None, resource_arn: Optional[pulumi.Input[str]]=None, value: Optional[pulumi.Input[str]]=None, __props__=None):
'\n ## Import\n\n `aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.\n\n ```sh\n $ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: Tag name.\n :param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.\n :param pulumi.Input[str] value: Tag value.\n '
... | -4,185,412,963,249,030,700 | ## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value. | sdk/python/pulumi_aws/ecs/tag.py | __init__ | pulumi/pulumi-aws | python | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None, resource_arn: Optional[pulumi.Input[str]]=None, value: Optional[pulumi.Input[str]]=None, __props__=None):
'\n ## Import\n\n `aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.\n\n ```sh\n $ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: Tag name.\n :param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.\n :param pulumi.Input[str] value: Tag value.\n '
... |
@overload
def __init__(__self__, resource_name: str, args: TagArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n ## Import\n\n `aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.\n\n ```sh\n $ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name\n ```\n\n :param str resource_name: The name of the resource.\n :param TagArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... | 6,637,949,711,491,176,000 | ## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_aws/ecs/tag.py | __init__ | pulumi/pulumi-aws | python | @overload
def __init__(__self__, resource_name: str, args: TagArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n ## Import\n\n `aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.\n\n ```sh\n $ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name\n ```\n\n :param str resource_name: The name of the resource.\n :param TagArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... |
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None, resource_arn: Optional[pulumi.Input[str]]=None, value: Optional[pulumi.Input[str]]=None) -> 'Tag':
"\n Get an existing Tag resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: Tag name.\n :param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.\n :param pulumi.Input[str] value: Tag value.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__['key'] = key
__props__.__dict__['resource_arn'] = resource_arn
__props__.__dict__['value'] = value
return Tag(resource_name, opts=opts, __props__=__props__) | -5,005,814,816,939,013,000 | Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value. | sdk/python/pulumi_aws/ecs/tag.py | get | pulumi/pulumi-aws | python | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, key: Optional[pulumi.Input[str]]=None, resource_arn: Optional[pulumi.Input[str]]=None, value: Optional[pulumi.Input[str]]=None) -> 'Tag':
"\n Get an existing Tag resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] key: Tag name.\n :param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.\n :param pulumi.Input[str] value: Tag value.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__['key'] = key
__props__.__dict__['resource_arn'] = resource_arn
__props__.__dict__['value'] = value
return Tag(resource_name, opts=opts, __props__=__props__) |
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
'\n Tag name.\n '
return pulumi.get(self, 'key') | -3,099,507,193,933,223,400 | Tag name. | sdk/python/pulumi_aws/ecs/tag.py | key | pulumi/pulumi-aws | python | @property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'key') |
@property
@pulumi.getter(name='resourceArn')
def resource_arn(self) -> pulumi.Output[str]:
'\n Amazon Resource Name (ARN) of the ECS resource to tag.\n '
return pulumi.get(self, 'resource_arn') | 7,090,505,760,660,997,000 | Amazon Resource Name (ARN) of the ECS resource to tag. | sdk/python/pulumi_aws/ecs/tag.py | resource_arn | pulumi/pulumi-aws | python | @property
@pulumi.getter(name='resourceArn')
def resource_arn(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'resource_arn') |
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
'\n Tag value.\n '
return pulumi.get(self, 'value') | 4,303,320,646,784,983,000 | Tag value. | sdk/python/pulumi_aws/ecs/tag.py | value | pulumi/pulumi-aws | python | @property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'value') |
def GetExperimentArgs():
"Returns a list of arguments with all tested field trials.\n\n This function is a simple wrapper around the variation team's fieldtrail_util\n script that generates command line arguments to test Chromium field trials.\n\n Returns:\n an array of command line arguments to pass to chrome\n "
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif (platform.system().lower() == 'linux'):
my_platform = 'linux'
elif (platform.system().lower() == 'windows'):
my_platform = 'windows'
elif (platform.system().lower() == 'darwin'):
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform) | -8,818,158,228,978,697,000 | Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome | tools/chrome_proxy/webdriver/variations_combinations.py | GetExperimentArgs | jnpatel2811/chromium | python | def GetExperimentArgs():
"Returns a list of arguments with all tested field trials.\n\n This function is a simple wrapper around the variation team's fieldtrail_util\n script that generates command line arguments to test Chromium field trials.\n\n Returns:\n an array of command line arguments to pass to chrome\n "
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform =
if common.ParseFlags().android:
my_platform = 'android'
elif (platform.system().lower() == 'linux'):
my_platform = 'linux'
elif (platform.system().lower() == 'windows'):
my_platform = 'windows'
elif (platform.system().lower() == 'darwin'):
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform) |
def GenerateTestSuites():
'A generator function that yields non-blacklisted tests to run.\n\n This function yields test suites each with a single test case whose id is not\n blacklisted in the array at the top of this file.\n\n Yields:\n non-blacklisted test suites to run\n '
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if (test_method.id() not in test_blacklist):
ts = unittest.TestSuite()
ts.addTest(test_method)
(yield (ts, test_method.id())) | 2,000,100,436,453,714,700 | A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run | tools/chrome_proxy/webdriver/variations_combinations.py | GenerateTestSuites | jnpatel2811/chromium | python | def GenerateTestSuites():
'A generator function that yields non-blacklisted tests to run.\n\n This function yields test suites each with a single test case whose id is not\n blacklisted in the array at the top of this file.\n\n Yields:\n non-blacklisted test suites to run\n '
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if (test_method.id() not in test_blacklist):
ts = unittest.TestSuite()
ts.addTest(test_method)
(yield (ts, test_method.id())) |
def ParseFlagsWithExtraBrowserArgs(extra_args):
'Generates a function to override common.ParseFlags.\n\n The returned function will honor everything in the original ParseFlags(), but\n adds on additional browser_args.\n\n Args:\n extra_args: The extra browser agruments to add.\n Returns:\n A function to override common.ParseFlags with additional browser_args.\n '
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = (((original_flags.browser_args if original_flags.browser_args else '') + ' ') + extra_args)
return original_flags
return AddExtraBrowserArgs | -5,581,824,193,676,476,000 | Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args. | tools/chrome_proxy/webdriver/variations_combinations.py | ParseFlagsWithExtraBrowserArgs | jnpatel2811/chromium | python | def ParseFlagsWithExtraBrowserArgs(extra_args):
'Generates a function to override common.ParseFlags.\n\n The returned function will honor everything in the original ParseFlags(), but\n adds on additional browser_args.\n\n Args:\n extra_args: The extra browser agruments to add.\n Returns:\n A function to override common.ParseFlags with additional browser_args.\n '
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = (((original_flags.browser_args if original_flags.browser_args else ) + ' ') + extra_args)
return original_flags
return AddExtraBrowserArgs |
def main():
"Runs all non-blacklisted tests against Chromium field trials.\n\n This script run all chrome proxy integration tests that haven't been\n blacklisted against the field trial testing configuration used by Chromium\n perf bots.\n "
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
for (test_suite, test_id) in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write(('%s... ' % test_id))
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2, buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print(("%s %s %s --test_filter=%s --browser_args='%s'" % (sys.executable, os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args)))
if flags.failfast:
return | -8,306,183,805,846,712,000 | Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots. | tools/chrome_proxy/webdriver/variations_combinations.py | main | jnpatel2811/chromium | python | def main():
"Runs all non-blacklisted tests against Chromium field trials.\n\n This script run all chrome proxy integration tests that haven't been\n blacklisted against the field trial testing configuration used by Chromium\n perf bots.\n "
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
for (test_suite, test_id) in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write(('%s... ' % test_id))
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2, buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print(("%s %s %s --test_filter=%s --browser_args='%s'" % (sys.executable, os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args)))
if flags.failfast:
return |
def plot_gate_map(backend, figsize=None, plot_directed=False, label_qubits=True, qubit_size=24, line_width=4, font_size=12, qubit_color=None, qubit_labels=None, line_color=None, font_color='w', ax=None):
"Plots the gate map of a device.\n\n Args:\n backend (BaseBackend): A backend instance,\n figsize (tuple): Output figure size (wxh) in inches.\n plot_directed (bool): Plot directed coupling map.\n label_qubits (bool): Label the qubits.\n qubit_size (float): Size of qubit marker.\n line_width (float): Width of lines.\n font_size (int): Font size of qubit labels.\n qubit_color (list): A list of colors for the qubits\n qubit_labels (list): A list of qubit labels\n line_color (list): A list of colors for each line from coupling_map.\n font_color (str): The font color for the qubit labels.\n ax (Axes): A Matplotlib axes instance.\n\n Returns:\n Figure: A Matplotlib figure instance.\n\n Raises:\n QiskitError: if tried to pass a simulator.\n ImportError: if matplotlib not installed.\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, IBMQ\n from qiskit.visualization import plot_gate_map\n %matplotlib inline\n\n provider = IBMQ.load_account()\n accountProvider = IBMQ.get_provider(hub='ibm-q')\n backend = accountProvider.get_backend('ibmq_vigo')\n plot_gate_map(backend)\n "
if (not HAS_MATPLOTLIB):
raise ImportError('Must have Matplotlib installed. To install, run "pip install matplotlib".')
from matplotlib import get_backend
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if backend.configuration().simulator:
raise QiskitError('Requires a device backend, not simulator.')
input_axes = False
if ax:
input_axes = True
mpl_data = {}
mpl_data[1] = [[0, 0]]
mpl_data[5] = [[1, 0], [0, 1], [1, 1], [1, 2], [2, 1]]
mpl_data[7] = [[0, 0], [0, 1], [0, 2], [1, 1], [2, 0], [2, 1], [2, 2]]
mpl_data[20] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4]]
mpl_data[15] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [1, 7], [1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1], [1, 0]]
mpl_data[16] = [[1, 0], [0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], [1, 7], [1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1]]
mpl_data[27] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2], [3, 2], [0, 3], [1, 3], [3, 3], [4, 3], [1, 4], [3, 4], [1, 5], [2, 5], [3, 5], [1, 6], [3, 6], [0, 7], [1, 7], [3, 7], [4, 7], [1, 8], [3, 8], [1, 9], [2, 9], [3, 9], [3, 10]]
mpl_data[28] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [1, 2], [1, 6], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7], [2, 8], [3, 0], [3, 4], [3, 8], [4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [4, 8]]
mpl_data[53] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [1, 2], [1, 6], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7], [2, 8], [3, 0], [3, 4], [3, 8], [4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [4, 8], [5, 2], [5, 6], [6, 0], [6, 1], [6, 2], [6, 3], [6, 4], [6, 5], [6, 6], [6, 7], [6, 8], [7, 0], [7, 4], [7, 8], [8, 0], [8, 1], [8, 2], [8, 3], [8, 4], [8, 5], [8, 6], [8, 7], [8, 8], [9, 2], [9, 6]]
mpl_data[65] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], [0, 8], [0, 9], [1, 0], [1, 4], [1, 8], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7], [2, 8], [2, 9], [2, 10], [3, 2], [3, 6], [3, 10], [4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [4, 8], [4, 9], [4, 10], [5, 0], [5, 4], [5, 8], [6, 0], [6, 1], [6, 2], [6, 3], [6, 4], [6, 5], [6, 6], [6, 7], [6, 8], [6, 9], [6, 10], [7, 2], [7, 6], [7, 10], [8, 1], [8, 2], [8, 3], [8, 4], [8, 5], [8, 6], [8, 7], [8, 8], [8, 9], [8, 10]]
config = backend.configuration()
num_qubits = config.n_qubits
cmap = config.coupling_map
if (qubit_labels is None):
qubit_labels = list(range(num_qubits))
elif (len(qubit_labels) != num_qubits):
raise QiskitError('Length of qubit labels does not equal number of qubits.')
if (num_qubits in mpl_data.keys()):
grid_data = mpl_data[num_qubits]
elif (not input_axes):
(fig, ax) = plt.subplots(figsize=(5, 5))
ax.axis('off')
return fig
x_max = max([d[1] for d in grid_data])
y_max = max([d[0] for d in grid_data])
max_dim = max(x_max, y_max)
if (figsize is None):
if ((num_qubits == 1) or (((x_max / max_dim) > 0.33) and ((y_max / max_dim) > 0.33))):
figsize = (5, 5)
else:
figsize = (9, 3)
if (ax is None):
(fig, ax) = plt.subplots(figsize=figsize)
ax.axis('off')
if (qubit_color is None):
qubit_color = (['#648fff'] * config.n_qubits)
if (line_color is None):
line_color = ((['#648fff'] * len(cmap)) if cmap else [])
if (num_qubits != 1):
for (ind, edge) in enumerate(cmap):
is_symmetric = False
if (edge[::(- 1)] in cmap):
is_symmetric = True
y_start = grid_data[edge[0]][0]
x_start = grid_data[edge[0]][1]
y_end = grid_data[edge[1]][0]
x_end = grid_data[edge[1]][1]
if is_symmetric:
if (y_start == y_end):
x_end = (((x_end - x_start) / 2) + x_start)
elif (x_start == x_end):
y_end = (((y_end - y_start) / 2) + y_start)
else:
x_end = (((x_end - x_start) / 2) + x_start)
y_end = (((y_end - y_start) / 2) + y_start)
ax.add_artist(plt.Line2D([x_start, x_end], [(- y_start), (- y_end)], color=line_color[ind], linewidth=line_width, zorder=0))
if plot_directed:
dx = (x_end - x_start)
dy = (y_end - y_start)
if is_symmetric:
x_arrow = (x_start + (dx * 0.95))
y_arrow = ((- y_start) - (dy * 0.95))
dx_arrow = (dx * 0.01)
dy_arrow = ((- dy) * 0.01)
head_width = 0.15
else:
x_arrow = (x_start + (dx * 0.5))
y_arrow = ((- y_start) - (dy * 0.5))
dx_arrow = (dx * 0.2)
dy_arrow = ((- dy) * 0.2)
head_width = 0.2
ax.add_patch(mpatches.FancyArrow(x_arrow, y_arrow, dx_arrow, dy_arrow, head_width=head_width, length_includes_head=True, edgecolor=None, linewidth=0, facecolor=line_color[ind], zorder=1))
for (var, idx) in enumerate(grid_data):
_idx = [idx[1], (- idx[0])]
width = _GraphDist(qubit_size, ax, True)
height = _GraphDist(qubit_size, ax, False)
ax.add_artist(mpatches.Ellipse(_idx, width, height, color=qubit_color[var], zorder=1))
if label_qubits:
ax.text(*_idx, s=qubit_labels[var], horizontalalignment='center', verticalalignment='center', color=font_color, size=font_size, weight='bold')
ax.set_xlim([(- 1), (x_max + 1)])
ax.set_ylim([(- (y_max + 1)), 1])
if (not input_axes):
if (get_backend() in ['module://ipykernel.pylab.backend_inline', 'nbAgg']):
plt.close(fig)
return fig
return None | 8,764,939,721,214,748,000 | Plots the gate map of a device.
Args:
backend (BaseBackend): A backend instance,
figsize (tuple): Output figure size (wxh) in inches.
plot_directed (bool): Plot directed coupling map.
label_qubits (bool): Label the qubits.
qubit_size (float): Size of qubit marker.
line_width (float): Width of lines.
font_size (int): Font size of qubit labels.
qubit_color (list): A list of colors for the qubits
qubit_labels (list): A list of qubit labels
line_color (list): A list of colors for each line from coupling_map.
font_color (str): The font color for the qubit labels.
ax (Axes): A Matplotlib axes instance.
Returns:
Figure: A Matplotlib figure instance.
Raises:
QiskitError: if tried to pass a simulator.
ImportError: if matplotlib not installed.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_gate_map
%matplotlib inline
provider = IBMQ.load_account()
accountProvider = IBMQ.get_provider(hub='ibm-q')
backend = accountProvider.get_backend('ibmq_vigo')
plot_gate_map(backend) | qiskit/visualization/gate_map.py | plot_gate_map | AzizNgoueya/qiskit-terra | python | def plot_gate_map(backend, figsize=None, plot_directed=False, label_qubits=True, qubit_size=24, line_width=4, font_size=12, qubit_color=None, qubit_labels=None, line_color=None, font_color='w', ax=None):
"Plots the gate map of a device.\n\n Args:\n backend (BaseBackend): A backend instance,\n figsize (tuple): Output figure size (wxh) in inches.\n plot_directed (bool): Plot directed coupling map.\n label_qubits (bool): Label the qubits.\n qubit_size (float): Size of qubit marker.\n line_width (float): Width of lines.\n font_size (int): Font size of qubit labels.\n qubit_color (list): A list of colors for the qubits\n qubit_labels (list): A list of qubit labels\n line_color (list): A list of colors for each line from coupling_map.\n font_color (str): The font color for the qubit labels.\n ax (Axes): A Matplotlib axes instance.\n\n Returns:\n Figure: A Matplotlib figure instance.\n\n Raises:\n QiskitError: if tried to pass a simulator.\n ImportError: if matplotlib not installed.\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, IBMQ\n from qiskit.visualization import plot_gate_map\n %matplotlib inline\n\n provider = IBMQ.load_account()\n accountProvider = IBMQ.get_provider(hub='ibm-q')\n backend = accountProvider.get_backend('ibmq_vigo')\n plot_gate_map(backend)\n "
if (not HAS_MATPLOTLIB):
raise ImportError('Must have Matplotlib installed. To install, run "pip install matplotlib".')
from matplotlib import get_backend
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if backend.configuration().simulator:
raise QiskitError('Requires a device backend, not simulator.')
input_axes = False
if ax:
input_axes = True
mpl_data = {}
mpl_data[1] = [[0, 0]]
mpl_data[5] = [[1, 0], [0, 1], [1, 1], [1, 2], [2, 1]]
mpl_data[7] = [[0, 0], [0, 1], [0, 2], [1, 1], [2, 0], [2, 1], [2, 2]]
mpl_data[20] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4]]
mpl_data[15] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [1, 7], [1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1], [1, 0]]
mpl_data[16] = [[1, 0], [0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], [1, 7], [1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1]]
mpl_data[27] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2], [3, 2], [0, 3], [1, 3], [3, 3], [4, 3], [1, 4], [3, 4], [1, 5], [2, 5], [3, 5], [1, 6], [3, 6], [0, 7], [1, 7], [3, 7], [4, 7], [1, 8], [3, 8], [1, 9], [2, 9], [3, 9], [3, 10]]
mpl_data[28] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [1, 2], [1, 6], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7], [2, 8], [3, 0], [3, 4], [3, 8], [4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [4, 8]]
mpl_data[53] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [1, 2], [1, 6], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7], [2, 8], [3, 0], [3, 4], [3, 8], [4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [4, 8], [5, 2], [5, 6], [6, 0], [6, 1], [6, 2], [6, 3], [6, 4], [6, 5], [6, 6], [6, 7], [6, 8], [7, 0], [7, 4], [7, 8], [8, 0], [8, 1], [8, 2], [8, 3], [8, 4], [8, 5], [8, 6], [8, 7], [8, 8], [9, 2], [9, 6]]
mpl_data[65] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], [0, 8], [0, 9], [1, 0], [1, 4], [1, 8], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7], [2, 8], [2, 9], [2, 10], [3, 2], [3, 6], [3, 10], [4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [4, 8], [4, 9], [4, 10], [5, 0], [5, 4], [5, 8], [6, 0], [6, 1], [6, 2], [6, 3], [6, 4], [6, 5], [6, 6], [6, 7], [6, 8], [6, 9], [6, 10], [7, 2], [7, 6], [7, 10], [8, 1], [8, 2], [8, 3], [8, 4], [8, 5], [8, 6], [8, 7], [8, 8], [8, 9], [8, 10]]
config = backend.configuration()
num_qubits = config.n_qubits
cmap = config.coupling_map
if (qubit_labels is None):
qubit_labels = list(range(num_qubits))
elif (len(qubit_labels) != num_qubits):
raise QiskitError('Length of qubit labels does not equal number of qubits.')
if (num_qubits in mpl_data.keys()):
grid_data = mpl_data[num_qubits]
elif (not input_axes):
(fig, ax) = plt.subplots(figsize=(5, 5))
ax.axis('off')
return fig
x_max = max([d[1] for d in grid_data])
y_max = max([d[0] for d in grid_data])
max_dim = max(x_max, y_max)
if (figsize is None):
if ((num_qubits == 1) or (((x_max / max_dim) > 0.33) and ((y_max / max_dim) > 0.33))):
figsize = (5, 5)
else:
figsize = (9, 3)
if (ax is None):
(fig, ax) = plt.subplots(figsize=figsize)
ax.axis('off')
if (qubit_color is None):
qubit_color = (['#648fff'] * config.n_qubits)
if (line_color is None):
line_color = ((['#648fff'] * len(cmap)) if cmap else [])
if (num_qubits != 1):
for (ind, edge) in enumerate(cmap):
is_symmetric = False
if (edge[::(- 1)] in cmap):
is_symmetric = True
y_start = grid_data[edge[0]][0]
x_start = grid_data[edge[0]][1]
y_end = grid_data[edge[1]][0]
x_end = grid_data[edge[1]][1]
if is_symmetric:
if (y_start == y_end):
x_end = (((x_end - x_start) / 2) + x_start)
elif (x_start == x_end):
y_end = (((y_end - y_start) / 2) + y_start)
else:
x_end = (((x_end - x_start) / 2) + x_start)
y_end = (((y_end - y_start) / 2) + y_start)
ax.add_artist(plt.Line2D([x_start, x_end], [(- y_start), (- y_end)], color=line_color[ind], linewidth=line_width, zorder=0))
if plot_directed:
dx = (x_end - x_start)
dy = (y_end - y_start)
if is_symmetric:
x_arrow = (x_start + (dx * 0.95))
y_arrow = ((- y_start) - (dy * 0.95))
dx_arrow = (dx * 0.01)
dy_arrow = ((- dy) * 0.01)
head_width = 0.15
else:
x_arrow = (x_start + (dx * 0.5))
y_arrow = ((- y_start) - (dy * 0.5))
dx_arrow = (dx * 0.2)
dy_arrow = ((- dy) * 0.2)
head_width = 0.2
ax.add_patch(mpatches.FancyArrow(x_arrow, y_arrow, dx_arrow, dy_arrow, head_width=head_width, length_includes_head=True, edgecolor=None, linewidth=0, facecolor=line_color[ind], zorder=1))
for (var, idx) in enumerate(grid_data):
_idx = [idx[1], (- idx[0])]
width = _GraphDist(qubit_size, ax, True)
height = _GraphDist(qubit_size, ax, False)
ax.add_artist(mpatches.Ellipse(_idx, width, height, color=qubit_color[var], zorder=1))
if label_qubits:
ax.text(*_idx, s=qubit_labels[var], horizontalalignment='center', verticalalignment='center', color=font_color, size=font_size, weight='bold')
ax.set_xlim([(- 1), (x_max + 1)])
ax.set_ylim([(- (y_max + 1)), 1])
if (not input_axes):
if (get_backend() in ['module://ipykernel.pylab.backend_inline', 'nbAgg']):
plt.close(fig)
return fig
return None |
def plot_circuit_layout(circuit, backend, view='virtual'):
"Plot the layout of a circuit transpiled for a given\n target backend.\n\n Args:\n circuit (QuantumCircuit): Input quantum circuit.\n backend (BaseBackend): Target backend.\n view (str): Layout view: either 'virtual' or 'physical'.\n\n Returns:\n Figure: A matplotlib figure showing layout.\n\n Raises:\n QiskitError: Invalid view type given.\n VisualizationError: Circuit has no layout attribute.\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit import QuantumCircuit, IBMQ, transpile\n from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout\n from qiskit.tools.monitor import job_monitor\n import matplotlib.pyplot as plt\n %matplotlib inline\n\n IBMQ.load_account()\n\n ghz = QuantumCircuit(3, 3)\n ghz.h(0)\n for idx in range(1,3):\n ghz.cx(0,idx)\n ghz.measure(range(3), range(3))\n\n provider = IBMQ.get_provider(hub='ibm-q')\n backend = provider.get_backend('ibmq_vigo')\n new_circ_lv3 = transpile(ghz, backend=backend, optimization_level=3)\n plot_circuit_layout(new_circ_lv3, backend)\n "
if (circuit._layout is None):
raise QiskitError('Circuit has no layout. Perhaps it has not been transpiled.')
num_qubits = backend.configuration().n_qubits
qubits = []
qubit_labels = ([None] * num_qubits)
if (view == 'virtual'):
for (key, val) in circuit._layout.get_virtual_bits().items():
if (key.register.name != 'ancilla'):
qubits.append(val)
qubit_labels[val] = key.index
elif (view == 'physical'):
for (key, val) in circuit._layout.get_physical_bits().items():
if (val.register.name != 'ancilla'):
qubits.append(key)
qubit_labels[key] = key
else:
raise VisualizationError("Layout view must be 'virtual' or 'physical'.")
qcolors = (['#648fff'] * num_qubits)
for k in qubits:
qcolors[k] = 'k'
cmap = backend.configuration().coupling_map
lcolors = (['#648fff'] * len(cmap))
for (idx, edge) in enumerate(cmap):
if ((edge[0] in qubits) and (edge[1] in qubits)):
lcolors[idx] = 'k'
fig = plot_gate_map(backend, qubit_color=qcolors, qubit_labels=qubit_labels, line_color=lcolors)
return fig | 932,574,443,311,263,400 | Plot the layout of a circuit transpiled for a given
target backend.
Args:
circuit (QuantumCircuit): Input quantum circuit.
backend (BaseBackend): Target backend.
view (str): Layout view: either 'virtual' or 'physical'.
Returns:
Figure: A matplotlib figure showing layout.
Raises:
QiskitError: Invalid view type given.
VisualizationError: Circuit has no layout attribute.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit, IBMQ, transpile
from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout
from qiskit.tools.monitor import job_monitor
import matplotlib.pyplot as plt
%matplotlib inline
IBMQ.load_account()
ghz = QuantumCircuit(3, 3)
ghz.h(0)
for idx in range(1,3):
ghz.cx(0,idx)
ghz.measure(range(3), range(3))
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
new_circ_lv3 = transpile(ghz, backend=backend, optimization_level=3)
plot_circuit_layout(new_circ_lv3, backend) | qiskit/visualization/gate_map.py | plot_circuit_layout | AzizNgoueya/qiskit-terra | python | def plot_circuit_layout(circuit, backend, view='virtual'):
"Plot the layout of a circuit transpiled for a given\n target backend.\n\n Args:\n circuit (QuantumCircuit): Input quantum circuit.\n backend (BaseBackend): Target backend.\n view (str): Layout view: either 'virtual' or 'physical'.\n\n Returns:\n Figure: A matplotlib figure showing layout.\n\n Raises:\n QiskitError: Invalid view type given.\n VisualizationError: Circuit has no layout attribute.\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n import numpy as np\n from qiskit import QuantumCircuit, IBMQ, transpile\n from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout\n from qiskit.tools.monitor import job_monitor\n import matplotlib.pyplot as plt\n %matplotlib inline\n\n IBMQ.load_account()\n\n ghz = QuantumCircuit(3, 3)\n ghz.h(0)\n for idx in range(1,3):\n ghz.cx(0,idx)\n ghz.measure(range(3), range(3))\n\n provider = IBMQ.get_provider(hub='ibm-q')\n backend = provider.get_backend('ibmq_vigo')\n new_circ_lv3 = transpile(ghz, backend=backend, optimization_level=3)\n plot_circuit_layout(new_circ_lv3, backend)\n "
if (circuit._layout is None):
raise QiskitError('Circuit has no layout. Perhaps it has not been transpiled.')
num_qubits = backend.configuration().n_qubits
qubits = []
qubit_labels = ([None] * num_qubits)
if (view == 'virtual'):
for (key, val) in circuit._layout.get_virtual_bits().items():
if (key.register.name != 'ancilla'):
qubits.append(val)
qubit_labels[val] = key.index
elif (view == 'physical'):
for (key, val) in circuit._layout.get_physical_bits().items():
if (val.register.name != 'ancilla'):
qubits.append(key)
qubit_labels[key] = key
else:
raise VisualizationError("Layout view must be 'virtual' or 'physical'.")
qcolors = (['#648fff'] * num_qubits)
for k in qubits:
qcolors[k] = 'k'
cmap = backend.configuration().coupling_map
lcolors = (['#648fff'] * len(cmap))
for (idx, edge) in enumerate(cmap):
if ((edge[0] in qubits) and (edge[1] in qubits)):
lcolors[idx] = 'k'
fig = plot_gate_map(backend, qubit_color=qcolors, qubit_labels=qubit_labels, line_color=lcolors)
return fig |
def plot_error_map(backend, figsize=(12, 9), show_title=True):
"Plots the error map of a given backend.\n\n Args:\n backend (IBMQBackend): Given backend.\n figsize (tuple): Figure size in inches.\n show_title (bool): Show the title or not.\n\n Returns:\n Figure: A matplotlib figure showing error map.\n\n Raises:\n VisualizationError: Input is not IBMQ backend.\n ImportError: If seaborn is not installed\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, IBMQ\n from qiskit.visualization import plot_error_map\n %matplotlib inline\n\n IBMQ.load_account()\n provider = IBMQ.get_provider(hub='ibm-q')\n backend = provider.get_backend('ibmq_vigo')\n plot_error_map(backend)\n "
try:
import seaborn as sns
except ImportError:
raise ImportError('Must have seaborn installed to use plot_error_map. To install, run "pip install seaborn".')
if (not HAS_MATPLOTLIB):
raise ImportError('Must have Matplotlib installed. To install, run "pip install matplotlib".')
import matplotlib
from matplotlib import get_backend
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import ticker
color_map = sns.cubehelix_palette(reverse=True, as_cmap=True)
props = backend.properties().to_dict()
config = backend.configuration().to_dict()
num_qubits = config['n_qubits']
single_gate_errors = ([0] * num_qubits)
for gate in props['gates']:
if (gate['gate'] == 'u2'):
_qubit = gate['qubits'][0]
single_gate_errors[_qubit] = gate['parameters'][0]['value']
single_gate_errors = (100 * np.asarray(single_gate_errors))
avg_1q_err = np.mean(single_gate_errors)
single_norm = matplotlib.colors.Normalize(vmin=min(single_gate_errors), vmax=max(single_gate_errors))
q_colors = [color_map(single_norm(err)) for err in single_gate_errors]
cmap = config['coupling_map']
directed = False
line_colors = []
if cmap:
directed = False
if (num_qubits < 20):
for edge in cmap:
if (not ([edge[1], edge[0]] in cmap)):
directed = True
break
cx_errors = []
for line in cmap:
for item in props['gates']:
if (item['qubits'] == line):
cx_errors.append(item['parameters'][0]['value'])
break
else:
continue
cx_errors = (100 * np.asarray(cx_errors))
avg_cx_err = np.mean(cx_errors)
cx_norm = matplotlib.colors.Normalize(vmin=min(cx_errors), vmax=max(cx_errors))
line_colors = [color_map(cx_norm(err)) for err in cx_errors]
read_err = []
for qubit in range(num_qubits):
for item in props['qubits'][qubit]:
if (item['name'] == 'readout_error'):
read_err.append(item['value'])
read_err = (100 * np.asarray(read_err))
avg_read_err = np.mean(read_err)
max_read_err = np.max(read_err)
fig = plt.figure(figsize=figsize)
gridspec.GridSpec(nrows=2, ncols=3)
grid_spec = gridspec.GridSpec(12, 12, height_ratios=(([1] * 11) + [0.5]), width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2])
left_ax = plt.subplot(grid_spec[2:10, :1])
main_ax = plt.subplot(grid_spec[:11, 1:11])
right_ax = plt.subplot(grid_spec[2:10, 11:])
bleft_ax = plt.subplot(grid_spec[(- 1), :5])
if cmap:
bright_ax = plt.subplot(grid_spec[(- 1), 7:])
plot_gate_map(backend, qubit_color=q_colors, line_color=line_colors, qubit_size=28, line_width=5, plot_directed=directed, ax=main_ax)
main_ax.axis('off')
main_ax.set_aspect(1)
if cmap:
single_cb = matplotlib.colorbar.ColorbarBase(bleft_ax, cmap=color_map, norm=single_norm, orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
single_cb.locator = tick_locator
single_cb.update_ticks()
single_cb.update_ticks()
bleft_ax.set_title('H error rate (%) [Avg. = {}]'.format(round(avg_1q_err, 3)))
if (cmap is None):
bleft_ax.axis('off')
bleft_ax.set_title('H error rate (%) = {}'.format(round(avg_1q_err, 3)))
if cmap:
cx_cb = matplotlib.colorbar.ColorbarBase(bright_ax, cmap=color_map, norm=cx_norm, orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
cx_cb.locator = tick_locator
cx_cb.update_ticks()
bright_ax.set_title('CNOT error rate (%) [Avg. = {}]'.format(round(avg_cx_err, 3)))
if (num_qubits < 10):
num_left = num_qubits
num_right = 0
else:
num_left = math.ceil((num_qubits / 2))
num_right = (num_qubits - num_left)
left_ax.barh(range(num_left), read_err[:num_left], align='center', color='#DDBBBA')
left_ax.axvline(avg_read_err, linestyle='--', color='#212121')
left_ax.set_yticks(range(num_left))
left_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
left_ax.set_yticklabels([str(kk) for kk in range(num_left)], fontsize=12)
left_ax.invert_yaxis()
left_ax.set_title('Readout Error (%)', fontsize=12)
for spine in left_ax.spines.values():
spine.set_visible(False)
if num_right:
right_ax.barh(range(num_left, num_qubits), read_err[num_left:], align='center', color='#DDBBBA')
right_ax.axvline(avg_read_err, linestyle='--', color='#212121')
right_ax.set_yticks(range(num_left, num_qubits))
right_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
right_ax.set_yticklabels([str(kk) for kk in range(num_left, num_qubits)], fontsize=12)
right_ax.invert_yaxis()
right_ax.invert_xaxis()
right_ax.yaxis.set_label_position('right')
right_ax.yaxis.tick_right()
right_ax.set_title('Readout Error (%)', fontsize=12)
else:
right_ax.axis('off')
for spine in right_ax.spines.values():
spine.set_visible(False)
if show_title:
fig.suptitle('{name} Error Map'.format(name=backend.name()), fontsize=24, y=0.9)
if (get_backend() in ['module://ipykernel.pylab.backend_inline', 'nbAgg']):
plt.close(fig)
return fig | -1,502,234,631,552,956,700 | Plots the error map of a given backend.
Args:
backend (IBMQBackend): Given backend.
figsize (tuple): Figure size in inches.
show_title (bool): Show the title or not.
Returns:
Figure: A matplotlib figure showing error map.
Raises:
VisualizationError: Input is not IBMQ backend.
ImportError: If seaborn is not installed
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_error_map
%matplotlib inline
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
plot_error_map(backend) | qiskit/visualization/gate_map.py | plot_error_map | AzizNgoueya/qiskit-terra | python | def plot_error_map(backend, figsize=(12, 9), show_title=True):
"Plots the error map of a given backend.\n\n Args:\n backend (IBMQBackend): Given backend.\n figsize (tuple): Figure size in inches.\n show_title (bool): Show the title or not.\n\n Returns:\n Figure: A matplotlib figure showing error map.\n\n Raises:\n VisualizationError: Input is not IBMQ backend.\n ImportError: If seaborn is not installed\n\n Example:\n .. jupyter-execute::\n :hide-code:\n :hide-output:\n\n from qiskit.test.ibmq_mock import mock_get_backend\n mock_get_backend('FakeVigo')\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, IBMQ\n from qiskit.visualization import plot_error_map\n %matplotlib inline\n\n IBMQ.load_account()\n provider = IBMQ.get_provider(hub='ibm-q')\n backend = provider.get_backend('ibmq_vigo')\n plot_error_map(backend)\n "
try:
import seaborn as sns
except ImportError:
raise ImportError('Must have seaborn installed to use plot_error_map. To install, run "pip install seaborn".')
if (not HAS_MATPLOTLIB):
raise ImportError('Must have Matplotlib installed. To install, run "pip install matplotlib".')
import matplotlib
from matplotlib import get_backend
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import ticker
color_map = sns.cubehelix_palette(reverse=True, as_cmap=True)
props = backend.properties().to_dict()
config = backend.configuration().to_dict()
num_qubits = config['n_qubits']
single_gate_errors = ([0] * num_qubits)
for gate in props['gates']:
if (gate['gate'] == 'u2'):
_qubit = gate['qubits'][0]
single_gate_errors[_qubit] = gate['parameters'][0]['value']
single_gate_errors = (100 * np.asarray(single_gate_errors))
avg_1q_err = np.mean(single_gate_errors)
single_norm = matplotlib.colors.Normalize(vmin=min(single_gate_errors), vmax=max(single_gate_errors))
q_colors = [color_map(single_norm(err)) for err in single_gate_errors]
cmap = config['coupling_map']
directed = False
line_colors = []
if cmap:
directed = False
if (num_qubits < 20):
for edge in cmap:
if (not ([edge[1], edge[0]] in cmap)):
directed = True
break
cx_errors = []
for line in cmap:
for item in props['gates']:
if (item['qubits'] == line):
cx_errors.append(item['parameters'][0]['value'])
break
else:
continue
cx_errors = (100 * np.asarray(cx_errors))
avg_cx_err = np.mean(cx_errors)
cx_norm = matplotlib.colors.Normalize(vmin=min(cx_errors), vmax=max(cx_errors))
line_colors = [color_map(cx_norm(err)) for err in cx_errors]
read_err = []
for qubit in range(num_qubits):
for item in props['qubits'][qubit]:
if (item['name'] == 'readout_error'):
read_err.append(item['value'])
read_err = (100 * np.asarray(read_err))
avg_read_err = np.mean(read_err)
max_read_err = np.max(read_err)
fig = plt.figure(figsize=figsize)
gridspec.GridSpec(nrows=2, ncols=3)
grid_spec = gridspec.GridSpec(12, 12, height_ratios=(([1] * 11) + [0.5]), width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2])
left_ax = plt.subplot(grid_spec[2:10, :1])
main_ax = plt.subplot(grid_spec[:11, 1:11])
right_ax = plt.subplot(grid_spec[2:10, 11:])
bleft_ax = plt.subplot(grid_spec[(- 1), :5])
if cmap:
bright_ax = plt.subplot(grid_spec[(- 1), 7:])
plot_gate_map(backend, qubit_color=q_colors, line_color=line_colors, qubit_size=28, line_width=5, plot_directed=directed, ax=main_ax)
main_ax.axis('off')
main_ax.set_aspect(1)
if cmap:
single_cb = matplotlib.colorbar.ColorbarBase(bleft_ax, cmap=color_map, norm=single_norm, orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
single_cb.locator = tick_locator
single_cb.update_ticks()
single_cb.update_ticks()
bleft_ax.set_title('H error rate (%) [Avg. = {}]'.format(round(avg_1q_err, 3)))
if (cmap is None):
bleft_ax.axis('off')
bleft_ax.set_title('H error rate (%) = {}'.format(round(avg_1q_err, 3)))
if cmap:
cx_cb = matplotlib.colorbar.ColorbarBase(bright_ax, cmap=color_map, norm=cx_norm, orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
cx_cb.locator = tick_locator
cx_cb.update_ticks()
bright_ax.set_title('CNOT error rate (%) [Avg. = {}]'.format(round(avg_cx_err, 3)))
if (num_qubits < 10):
num_left = num_qubits
num_right = 0
else:
num_left = math.ceil((num_qubits / 2))
num_right = (num_qubits - num_left)
left_ax.barh(range(num_left), read_err[:num_left], align='center', color='#DDBBBA')
left_ax.axvline(avg_read_err, linestyle='--', color='#212121')
left_ax.set_yticks(range(num_left))
left_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
left_ax.set_yticklabels([str(kk) for kk in range(num_left)], fontsize=12)
left_ax.invert_yaxis()
left_ax.set_title('Readout Error (%)', fontsize=12)
for spine in left_ax.spines.values():
spine.set_visible(False)
if num_right:
right_ax.barh(range(num_left, num_qubits), read_err[num_left:], align='center', color='#DDBBBA')
right_ax.axvline(avg_read_err, linestyle='--', color='#212121')
right_ax.set_yticks(range(num_left, num_qubits))
right_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
right_ax.set_yticklabels([str(kk) for kk in range(num_left, num_qubits)], fontsize=12)
right_ax.invert_yaxis()
right_ax.invert_xaxis()
right_ax.yaxis.set_label_position('right')
right_ax.yaxis.tick_right()
right_ax.set_title('Readout Error (%)', fontsize=12)
else:
right_ax.axis('off')
for spine in right_ax.spines.values():
spine.set_visible(False)
if show_title:
fig.suptitle('{name} Error Map'.format(name=backend.name()), fontsize=24, y=0.9)
if (get_backend() in ['module://ipykernel.pylab.backend_inline', 'nbAgg']):
plt.close(fig)
return fig |
@property
def dist_real(self):
'Compute distance.\n '
(x0, y0) = self.ax.transAxes.transform((0, 0))
(x1, y1) = self.ax.transAxes.transform((1, 1))
value = ((x1 - x0) if self.x else (y1 - y0))
return value | -7,645,432,826,772,829,000 | Compute distance. | qiskit/visualization/gate_map.py | dist_real | AzizNgoueya/qiskit-terra | python | @property
def dist_real(self):
'\n '
(x0, y0) = self.ax.transAxes.transform((0, 0))
(x1, y1) = self.ax.transAxes.transform((1, 1))
value = ((x1 - x0) if self.x else (y1 - y0))
return value |
@property
def dist_abs(self):
'Distance abs\n '
bounds = (self.ax.get_xlim() if self.x else self.ax.get_ylim())
return (bounds[0] - bounds[1]) | 5,587,799,260,997,822,000 | Distance abs | qiskit/visualization/gate_map.py | dist_abs | AzizNgoueya/qiskit-terra | python | @property
def dist_abs(self):
'\n '
bounds = (self.ax.get_xlim() if self.x else self.ax.get_ylim())
return (bounds[0] - bounds[1]) |
@property
def value(self):
'Return value.\n '
return ((self.size / self.dist_real) * self.dist_abs) | -2,474,716,988,918,986,000 | Return value. | qiskit/visualization/gate_map.py | value | AzizNgoueya/qiskit-terra | python | @property
def value(self):
'\n '
return ((self.size / self.dist_real) * self.dist_abs) |
def test_paginate(self):
'Places Pager object in context with size/num from request.'
from moztrap.model.tags.models import Tag
tpl = template.Template('{% load pagination %}{% paginate queryset as pager %}{% for obj in pager.objects %}{{ obj }} {% endfor %}')
request = Mock()
request.GET = {'pagesize': 3, 'pagenumber': 2}
for i in range(1, 7):
self.F.TagFactory.create(name=str(i))
qs = Tag.objects.all()
output = tpl.render(template.Context({'request': request, 'queryset': qs}))
self.assertEqual(output, '4 5 6 ') | 2,482,848,873,757,526,000 | Places Pager object in context with size/num from request. | tests/view/lists/templatetags/test_pagination.py | test_paginate | UCL/moztrap | python | def test_paginate(self):
from moztrap.model.tags.models import Tag
tpl = template.Template('{% load pagination %}{% paginate queryset as pager %}{% for obj in pager.objects %}{{ obj }} {% endfor %}')
request = Mock()
request.GET = {'pagesize': 3, 'pagenumber': 2}
for i in range(1, 7):
self.F.TagFactory.create(name=str(i))
qs = Tag.objects.all()
output = tpl.render(template.Context({'request': request, 'queryset': qs}))
self.assertEqual(output, '4 5 6 ') |
def test_pagenumber_url(self):
'``pagenumber_url`` filter updates pagenumber in URL.'
from moztrap.view.lists.templatetags.pagination import pagenumber_url
request = Mock()
request.get_full_path.return_value = 'http://localhost/?pagenumber=2&pagesize=10'
self.assertEqual(pagenumber_url(request, 1), 'http://localhost/?pagenumber=1&pagesize=10') | 7,664,268,065,005,800,000 | ``pagenumber_url`` filter updates pagenumber in URL. | tests/view/lists/templatetags/test_pagination.py | test_pagenumber_url | UCL/moztrap | python | def test_pagenumber_url(self):
from moztrap.view.lists.templatetags.pagination import pagenumber_url
request = Mock()
request.get_full_path.return_value = 'http://localhost/?pagenumber=2&pagesize=10'
self.assertEqual(pagenumber_url(request, 1), 'http://localhost/?pagenumber=1&pagesize=10') |
def test_pagesize_url(self):
'``pagesize_url`` updates pagesize in URL (and jumps to page 1).'
from moztrap.view.lists.templatetags.pagination import pagesize_url
request = Mock()
request.get_full_path.return_value = 'http://localhost/?pagenumber=2&pagesize=10'
self.assertEqual(pagesize_url(request, 20), 'http://localhost/?pagenumber=1&pagesize=20') | -470,807,238,691,450,940 | ``pagesize_url`` updates pagesize in URL (and jumps to page 1). | tests/view/lists/templatetags/test_pagination.py | test_pagesize_url | UCL/moztrap | python | def test_pagesize_url(self):
from moztrap.view.lists.templatetags.pagination import pagesize_url
request = Mock()
request.get_full_path.return_value = 'http://localhost/?pagenumber=2&pagesize=10'
self.assertEqual(pagesize_url(request, 20), 'http://localhost/?pagenumber=1&pagesize=20') |
def test_pagenumber(self):
'``pagenumber`` gets the pagenumber from the request.'
from moztrap.view.lists.templatetags.pagination import pagenumber
request = Mock()
request.GET = {'pagenumber': 2, 'pagesize': 10}
self.assertEqual(pagenumber(request), 2) | 4,844,378,220,135,801,000 | ``pagenumber`` gets the pagenumber from the request. | tests/view/lists/templatetags/test_pagination.py | test_pagenumber | UCL/moztrap | python | def test_pagenumber(self):
from moztrap.view.lists.templatetags.pagination import pagenumber
request = Mock()
request.GET = {'pagenumber': 2, 'pagesize': 10}
self.assertEqual(pagenumber(request), 2) |
def test_pagesize(self):
'``pagenumber`` gets the pagenumber from the request.'
from moztrap.view.lists.templatetags.pagination import pagesize
request = Mock()
request.GET = {'pagenumber': 2, 'pagesize': 10}
self.assertEqual(pagesize(request), 10) | 201,397,973,904,035,840 | ``pagenumber`` gets the pagenumber from the request. | tests/view/lists/templatetags/test_pagination.py | test_pagesize | UCL/moztrap | python | def test_pagesize(self):
from moztrap.view.lists.templatetags.pagination import pagesize
request = Mock()
request.GET = {'pagenumber': 2, 'pagesize': 10}
self.assertEqual(pagesize(request), 10) |
def as_mpl_artists(shape_list, properties_func=None, text_offset=5.0, origin=1):
"\n Converts a region list to a list of patches and a list of artists.\n\n\n Optional Keywords:\n [ text_offset ] - If there is text associated with the regions, add\n some vertical offset (in pixels) to the text so that it doesn't overlap\n with the regions.\n\n Often, the regions files implicitly assume the lower-left corner\n of the image as a coordinate (1,1). However, the python convetion\n is that the array index starts from 0. By default (origin = 1),\n coordinates of the returned mpl artists have coordinate shifted by\n (1, 1). If you do not want this shift, set origin=0.\n "
patch_list = []
artist_list = []
if (properties_func is None):
properties_func = properties_func_default
saved_attrs = None
for shape in shape_list:
patches = []
if (saved_attrs is None):
_attrs = ([], {})
else:
_attrs = (copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1]))
kwargs = properties_func(shape, _attrs)
if (shape.name == 'composite'):
saved_attrs = shape.attr
continue
if ((saved_attrs is None) and shape.continued):
saved_attrs = shape.attr
if (not shape.continued):
saved_attrs = None
txt = shape.attr[1].get('text')
if (shape.name == 'polygon'):
xy = np.array(shape.coord_list)
xy.shape = ((- 1), 2)
patches = [mpatches.Polygon((xy - origin), closed=True, **kwargs)]
elif ((shape.name == 'rotbox') or (shape.name == 'box')):
(xc, yc, w, h, rot) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
_box = np.array([[((- w) / 2.0), ((- h) / 2.0)], [((- w) / 2.0), (h / 2.0)], [(w / 2.0), (h / 2.0)], [(w / 2.0), ((- h) / 2.0)]])
box = (_box + [xc, yc])
rotbox = rotated_polygon(box, xc, yc, rot)
patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]
elif (shape.name == 'ellipse'):
(xc, yc) = shape.coord_list[:2]
(xc, yc) = ((xc - origin), (yc - origin))
angle = shape.coord_list[(- 1)]
(maj_list, min_list) = (shape.coord_list[2:(- 1):2], shape.coord_list[3:(- 1):2])
patches = [mpatches.Ellipse((xc, yc), (2 * maj), (2 * min), angle=angle, **kwargs) for (maj, min) in zip(maj_list, min_list)]
elif (shape.name == 'annulus'):
(xc, yc) = shape.coord_list[:2]
(xc, yc) = ((xc - origin), (yc - origin))
r_list = shape.coord_list[2:]
patches = [mpatches.Ellipse((xc, yc), (2 * r), (2 * r), **kwargs) for r in r_list]
elif (shape.name == 'circle'):
(xc, yc, major) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
patches = [mpatches.Ellipse((xc, yc), (2 * major), (2 * major), angle=0, **kwargs)]
elif (shape.name == 'panda'):
(xc, yc, a1, a2, an, r1, r2, rn) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
patches = [mpatches.Arc((xc, yc), (rr * 2), (rr * 2), angle=0, theta1=a1, theta2=a2, **kwargs) for rr in np.linspace(r1, r2, (rn + 1))]
for aa in np.linspace(a1, a2, (an + 1)):
xx = ((np.array([r1, r2]) * np.cos(((aa / 180.0) * np.pi))) + xc)
yy = ((np.array([r1, r2]) * np.sin(((aa / 180.0) * np.pi))) + yc)
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif (shape.name == 'pie'):
(xc, yc, r1, r2, a1, a2) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
patches = [mpatches.Arc((xc, yc), (rr * 2), (rr * 2), angle=0, theta1=a1, theta2=a2, **kwargs) for rr in [r1, r2]]
for aa in [a1, a2]:
xx = ((np.array([r1, r2]) * np.cos(((aa / 180.0) * np.pi))) + xc)
yy = ((np.array([r1, r2]) * np.sin(((aa / 180.0) * np.pi))) + yc)
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif (shape.name == 'epanda'):
(xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
(x1, y1) = (cos(((a1 / 180.0) * pi)), ((sin(((a1 / 180.0) * pi)) * r11) / r12))
(x2, y2) = (cos(((a2 / 180.0) * pi)), ((sin(((a2 / 180.0) * pi)) * r11) / r12))
(a1, a2) = (((atan2(y1, x1) / pi) * 180.0), ((atan2(y2, x2) / pi) * 180.0))
patches = [mpatches.Arc((xc, yc), (rr1 * 2), (rr2 * 2), angle=angle, theta1=a1, theta2=a2, **kwargs) for (rr1, rr2) in zip(np.linspace(r11, r21, (rn + 1)), np.linspace(r12, r22, (rn + 1)))]
for aa in np.linspace(a1, a2, (an + 1)):
xx = (np.array([r11, r21]) * np.cos(((aa / 180.0) * np.pi)))
yy = (np.array([r11, r21]) * np.sin(((aa / 180.0) * np.pi)))
p = Path(np.transpose([xx, yy]))
tr = Affine2D().scale(1, (r12 / r11)).rotate_deg(angle).translate(xc, yc)
p2 = tr.transform_path(p)
patches.append(mpatches.PathPatch(p2, **kwargs))
elif (shape.name == 'text'):
(xc, yc) = shape.coord_list[:2]
(xc, yc) = ((xc - origin), (yc - origin))
if txt:
_t = _get_text(txt, xc, yc, 0, 0, **kwargs)
artist_list.append(_t)
elif (shape.name == 'point'):
(xc, yc) = shape.coord_list[:2]
(xc, yc) = ((xc - origin), (yc - origin))
artist_list.append(Line2D([xc], [yc], **kwargs))
if txt:
textshape = copy.copy(shape)
textshape.name = 'text'
textkwargs = properties_func(textshape, _attrs)
_t = _get_text(txt, xc, yc, 0, text_offset, va='bottom', **textkwargs)
artist_list.append(_t)
elif (shape.name in ['line', 'vector']):
if (shape.name == 'line'):
(x1, y1, x2, y2) = shape.coord_list[:4]
(x1, y1, x2, y2) = ((x1 - origin), (y1 - origin), (x2 - origin), (y2 - origin))
(a1, a2) = shape.attr[1].get('line', '0 0').strip().split()[:2]
arrowstyle = '-'
if int(a1):
arrowstyle = ('<' + arrowstyle)
if int(a2):
arrowstyle = (arrowstyle + '>')
else:
(x1, y1, l, a) = shape.coord_list[:4]
(x1, y1) = ((x1 - origin), (y1 - origin))
(x2, y2) = ((x1 + (l * np.cos(((a / 180.0) * np.pi)))), (y1 + (l * np.sin(((a / 180.0) * np.pi)))))
v1 = int(shape.attr[1].get('vector', '0').strip())
if v1:
arrowstyle = '->'
else:
arrowstyle = '-'
patches = [mpatches.FancyArrowPatch(posA=(x1, y1), posB=(x2, y2), arrowstyle=arrowstyle, arrow_transmuter=None, connectionstyle='arc3', patchA=None, patchB=None, shrinkA=0, shrinkB=0, connector=None, **kwargs)]
else:
warnings.warn("'as_mpl_artists' does not know how to convert {0} to mpl artist".format(shape.name))
patch_list.extend(patches)
if (txt and patches):
textshape = copy.copy(shape)
textshape.name = 'text'
textkwargs = properties_func(textshape, _attrs)
_bb = [p.get_window_extent() for p in patches]
for p in patches:
p._transformSet = False
_bbox = Bbox.union(_bb)
(x0, y0, x1, y1) = _bbox.extents
xc = (0.5 * (x0 + x1))
_t = _get_text(txt, xc, y1, 0, text_offset, va='bottom', **textkwargs)
artist_list.append(_t)
return (patch_list, artist_list) | -6,207,374,083,399,434,000 | Converts a region list to a list of patches and a list of artists.
Optional Keywords:
[ text_offset ] - If there is text associated with the regions, add
some vertical offset (in pixels) to the text so that it doesn't overlap
with the regions.
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, set origin=0. | pyregion/mpl_helper.py | as_mpl_artists | keflavich/pyregion | python | def as_mpl_artists(shape_list, properties_func=None, text_offset=5.0, origin=1):
"\n Converts a region list to a list of patches and a list of artists.\n\n\n Optional Keywords:\n [ text_offset ] - If there is text associated with the regions, add\n some vertical offset (in pixels) to the text so that it doesn't overlap\n with the regions.\n\n Often, the regions files implicitly assume the lower-left corner\n of the image as a coordinate (1,1). However, the python convetion\n is that the array index starts from 0. By default (origin = 1),\n coordinates of the returned mpl artists have coordinate shifted by\n (1, 1). If you do not want this shift, set origin=0.\n "
patch_list = []
artist_list = []
if (properties_func is None):
properties_func = properties_func_default
saved_attrs = None
for shape in shape_list:
patches = []
if (saved_attrs is None):
_attrs = ([], {})
else:
_attrs = (copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1]))
kwargs = properties_func(shape, _attrs)
if (shape.name == 'composite'):
saved_attrs = shape.attr
continue
if ((saved_attrs is None) and shape.continued):
saved_attrs = shape.attr
if (not shape.continued):
saved_attrs = None
txt = shape.attr[1].get('text')
if (shape.name == 'polygon'):
xy = np.array(shape.coord_list)
xy.shape = ((- 1), 2)
patches = [mpatches.Polygon((xy - origin), closed=True, **kwargs)]
elif ((shape.name == 'rotbox') or (shape.name == 'box')):
(xc, yc, w, h, rot) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
_box = np.array([[((- w) / 2.0), ((- h) / 2.0)], [((- w) / 2.0), (h / 2.0)], [(w / 2.0), (h / 2.0)], [(w / 2.0), ((- h) / 2.0)]])
box = (_box + [xc, yc])
rotbox = rotated_polygon(box, xc, yc, rot)
patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]
elif (shape.name == 'ellipse'):
(xc, yc) = shape.coord_list[:2]
(xc, yc) = ((xc - origin), (yc - origin))
angle = shape.coord_list[(- 1)]
(maj_list, min_list) = (shape.coord_list[2:(- 1):2], shape.coord_list[3:(- 1):2])
patches = [mpatches.Ellipse((xc, yc), (2 * maj), (2 * min), angle=angle, **kwargs) for (maj, min) in zip(maj_list, min_list)]
elif (shape.name == 'annulus'):
(xc, yc) = shape.coord_list[:2]
(xc, yc) = ((xc - origin), (yc - origin))
r_list = shape.coord_list[2:]
patches = [mpatches.Ellipse((xc, yc), (2 * r), (2 * r), **kwargs) for r in r_list]
elif (shape.name == 'circle'):
(xc, yc, major) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
patches = [mpatches.Ellipse((xc, yc), (2 * major), (2 * major), angle=0, **kwargs)]
elif (shape.name == 'panda'):
(xc, yc, a1, a2, an, r1, r2, rn) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
patches = [mpatches.Arc((xc, yc), (rr * 2), (rr * 2), angle=0, theta1=a1, theta2=a2, **kwargs) for rr in np.linspace(r1, r2, (rn + 1))]
for aa in np.linspace(a1, a2, (an + 1)):
xx = ((np.array([r1, r2]) * np.cos(((aa / 180.0) * np.pi))) + xc)
yy = ((np.array([r1, r2]) * np.sin(((aa / 180.0) * np.pi))) + yc)
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif (shape.name == 'pie'):
(xc, yc, r1, r2, a1, a2) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
patches = [mpatches.Arc((xc, yc), (rr * 2), (rr * 2), angle=0, theta1=a1, theta2=a2, **kwargs) for rr in [r1, r2]]
for aa in [a1, a2]:
xx = ((np.array([r1, r2]) * np.cos(((aa / 180.0) * np.pi))) + xc)
yy = ((np.array([r1, r2]) * np.sin(((aa / 180.0) * np.pi))) + yc)
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif (shape.name == 'epanda'):
(xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle) = shape.coord_list
(xc, yc) = ((xc - origin), (yc - origin))
(x1, y1) = (cos(((a1 / 180.0) * pi)), ((sin(((a1 / 180.0) * pi)) * r11) / r12))
(x2, y2) = (cos(((a2 / 180.0) * pi)), ((sin(((a2 / 180.0) * pi)) * r11) / r12))
(a1, a2) = (((atan2(y1, x1) / pi) * 180.0), ((atan2(y2, x2) / pi) * 180.0))
patches = [mpatches.Arc((xc, yc), (rr1 * 2), (rr2 * 2), angle=angle, theta1=a1, theta2=a2, **kwargs) for (rr1, rr2) in zip(np.linspace(r11, r21, (rn + 1)), np.linspace(r12, r22, (rn + 1)))]
for aa in np.linspace(a1, a2, (an + 1)):
xx = (np.array([r11, r21]) * np.cos(((aa / 180.0) * np.pi)))
yy = (np.array([r11, r21]) * np.sin(((aa / 180.0) * np.pi)))
p = Path(np.transpose([xx, yy]))
tr = Affine2D().scale(1, (r12 / r11)).rotate_deg(angle).translate(xc, yc)
p2 = tr.transform_path(p)
patches.append(mpatches.PathPatch(p2, **kwargs))
elif (shape.name == 'text'):
(xc, yc) = shape.coord_list[:2]
(xc, yc) = ((xc - origin), (yc - origin))
if txt:
_t = _get_text(txt, xc, yc, 0, 0, **kwargs)
artist_list.append(_t)
elif (shape.name == 'point'):
(xc, yc) = shape.coord_list[:2]
(xc, yc) = ((xc - origin), (yc - origin))
artist_list.append(Line2D([xc], [yc], **kwargs))
if txt:
textshape = copy.copy(shape)
textshape.name = 'text'
textkwargs = properties_func(textshape, _attrs)
_t = _get_text(txt, xc, yc, 0, text_offset, va='bottom', **textkwargs)
artist_list.append(_t)
elif (shape.name in ['line', 'vector']):
if (shape.name == 'line'):
(x1, y1, x2, y2) = shape.coord_list[:4]
(x1, y1, x2, y2) = ((x1 - origin), (y1 - origin), (x2 - origin), (y2 - origin))
(a1, a2) = shape.attr[1].get('line', '0 0').strip().split()[:2]
arrowstyle = '-'
if int(a1):
arrowstyle = ('<' + arrowstyle)
if int(a2):
arrowstyle = (arrowstyle + '>')
else:
(x1, y1, l, a) = shape.coord_list[:4]
(x1, y1) = ((x1 - origin), (y1 - origin))
(x2, y2) = ((x1 + (l * np.cos(((a / 180.0) * np.pi)))), (y1 + (l * np.sin(((a / 180.0) * np.pi)))))
v1 = int(shape.attr[1].get('vector', '0').strip())
if v1:
arrowstyle = '->'
else:
arrowstyle = '-'
patches = [mpatches.FancyArrowPatch(posA=(x1, y1), posB=(x2, y2), arrowstyle=arrowstyle, arrow_transmuter=None, connectionstyle='arc3', patchA=None, patchB=None, shrinkA=0, shrinkB=0, connector=None, **kwargs)]
else:
warnings.warn("'as_mpl_artists' does not know how to convert {0} to mpl artist".format(shape.name))
patch_list.extend(patches)
if (txt and patches):
textshape = copy.copy(shape)
textshape.name = 'text'
textkwargs = properties_func(textshape, _attrs)
_bb = [p.get_window_extent() for p in patches]
for p in patches:
p._transformSet = False
_bbox = Bbox.union(_bb)
(x0, y0, x1, y1) = _bbox.extents
xc = (0.5 * (x0 + x1))
_t = _get_text(txt, xc, y1, 0, text_offset, va='bottom', **textkwargs)
artist_list.append(_t)
return (patch_list, artist_list) |
def __init__(self, i_prev, i_next, feeGrowthOutside):
'\n :type i_prev: int\n :type i_next: int\n '
self.i_prev = i_prev
self.i_next = i_next
self.Delta_L = 0
self.feeGrowthOutside = feeGrowthOutside
self.n_positions = 0 | 318,246,960,711,850,600 | :type i_prev: int
:type i_next: int | python/scfmm/__init__.py | __init__ | serokell/segmented-cfmm | python | def __init__(self, i_prev, i_next, feeGrowthOutside):
'\n :type i_prev: int\n :type i_next: int\n '
self.i_prev = i_prev
self.i_next = i_next
self.Delta_L = 0
self.feeGrowthOutside = feeGrowthOutside
self.n_positions = 0 |
@staticmethod
def tick(srp):
'\n Computes the closest tick index below a certain price, given its square root\n :param srp: square root of a price\n :return: the closest tick below a certain price\n '
if (srp == infinity):
return infinity
else:
return math.floor((math.log(srp) / math.log(math.sqrt(1.0001)))) | -7,826,974,968,341,321,000 | Computes the closest tick index below a certain price, given its square root
:param srp: square root of a price
:return: the closest tick below a certain price | python/scfmm/__init__.py | tick | serokell/segmented-cfmm | python | @staticmethod
def tick(srp):
'\n Computes the closest tick index below a certain price, given its square root\n :param srp: square root of a price\n :return: the closest tick below a certain price\n '
if (srp == infinity):
return infinity
else:
return math.floor((math.log(srp) / math.log(math.sqrt(1.0001)))) |
@staticmethod
def srp(tick):
'\n Computes the square root of the price corresponding to a given tick\n :param tick: the index of a tick\n :return: the corresponding square root price\n '
if (tick == infinity):
return infinity
return math.pow(math.sqrt(1.0001), tick) | 2,717,632,398,661,134,000 | Computes the square root of the price corresponding to a given tick
:param tick: the index of a tick
:return: the corresponding square root price | python/scfmm/__init__.py | srp | serokell/segmented-cfmm | python | @staticmethod
def srp(tick):
'\n Computes the square root of the price corresponding to a given tick\n :param tick: the index of a tick\n :return: the corresponding square root price\n '
if (tick == infinity):
return infinity
return math.pow(math.sqrt(1.0001), tick) |
def initialize_tick(self, i, i_l):
'\n Initialize a new tick at index i, provide the index of an initialized tick lower\n than i to find it easily in the linked list. Assumes that i is *not* already initialized.\n :param i:\n :param i_l:\n '
assert (i not in self.ticks)
assert (i_l < i)
i_next = self.ticks[i_l].i_next
if (i_next > i):
self.ticks[i_l].i_next = i
self.ticks[i] = Tick(i_l, i_next, (self.feeGrowth if (self.i_a >= i) else XY()))
self.ticks[i_next].i_prev = i
else:
self.initialize_tick(i, i_next) | -489,302,767,904,517,700 | Initialize a new tick at index i, provide the index of an initialized tick lower
than i to find it easily in the linked list. Assumes that i is *not* already initialized.
:param i:
:param i_l: | python/scfmm/__init__.py | initialize_tick | serokell/segmented-cfmm | python | def initialize_tick(self, i, i_l):
'\n Initialize a new tick at index i, provide the index of an initialized tick lower\n than i to find it easily in the linked list. Assumes that i is *not* already initialized.\n :param i:\n :param i_l:\n '
assert (i not in self.ticks)
assert (i_l < i)
i_next = self.ticks[i_l].i_next
if (i_next > i):
self.ticks[i_l].i_next = i
self.ticks[i] = Tick(i_l, i_next, (self.feeGrowth if (self.i_a >= i) else XY()))
self.ticks[i_next].i_prev = i
else:
self.initialize_tick(i, i_next) |
def __getattribute__(self, attr):
"Prevent 'private' attribute access"
if (attr in ('make_rdm1s', 'spin_square', 'contract_2e', 'absorb_h1e')):
raise AttributeError
else:
return object.__getattribute__(self, attr) | -7,457,239,160,112,903,000 | Prevent 'private' attribute access | pyscf/mcscf/test/test_addons.py | __getattribute__ | JFurness1/pyscf | python | def __getattribute__(self, attr):
if (attr in ('make_rdm1s', 'spin_square', 'contract_2e', 'absorb_h1e')):
raise AttributeError
else:
return object.__getattribute__(self, attr) |
def __getattribute__(self, attr):
"Prevent 'private' attribute access"
if (attr in ('make_rdm1s', 'spin_square', 'contract_2e', 'absorb_h1e')):
raise AttributeError
else:
return object.__getattribute__(self, attr) | -7,457,239,160,112,903,000 | Prevent 'private' attribute access | pyscf/mcscf/test/test_addons.py | __getattribute__ | JFurness1/pyscf | python | def __getattribute__(self, attr):
if (attr in ('make_rdm1s', 'spin_square', 'contract_2e', 'absorb_h1e')):
raise AttributeError
else:
return object.__getattribute__(self, attr) |
def get_first_UTC():
'\n Returns the first UTC recorded by Molonglo after the disk crash in October 2017\n '
return '2017-10-31-08:49:32' | -5,102,652,466,926,968,000 | Returns the first UTC recorded by Molonglo after the disk crash in October 2017 | helpers.py | get_first_UTC | vg2691994/mock_frb_injection_results | python | def get_first_UTC():
'\n \n '
return '2017-10-31-08:49:32' |
def schema_exists(self):
'\n \n Checks the estimated number of tuples in the subjects table to determine if data exists\n \n :return: \n '
with self.connection.cursor() as cursor:
cursor.execute('select reltuples FROM pg_class where relname = %s', ('subjects',))
result = cursor.fetchone()[0]
return (result > 0) | -7,340,034,216,028,805,000 | Checks the estimated number of tuples in the subjects table to determine if data exists
:return: | database_query_handler.py | schema_exists | jdwinkler/dbpedia_service | python | def schema_exists(self):
'\n \n Checks the estimated number of tuples in the subjects table to determine if data exists\n \n :return: \n '
with self.connection.cursor() as cursor:
cursor.execute('select reltuples FROM pg_class where relname = %s', ('subjects',))
result = cursor.fetchone()[0]
return (result > 0) |
def build_table_schema(self, schema_name, schema_file_path):
'\n \n Loads the dbpedia schema used for supporting downstream analysis. If the schema already exists, it is\n dropped (deleted) and recreated.\n \n :param schema_name: \n :param schema_file_path: \n :return: \n '
with self.connection.cursor() as cursor:
cursor.execute(('DROP SCHEMA IF EXISTS %s CASCADE' % schema_name))
schema_file = open(schema_file_path, 'rU').read()
cursor.execute(schema_file) | 172,376,289,086,650,050 | Loads the dbpedia schema used for supporting downstream analysis. If the schema already exists, it is
dropped (deleted) and recreated.
:param schema_name:
:param schema_file_path:
:return: | database_query_handler.py | build_table_schema | jdwinkler/dbpedia_service | python | def build_table_schema(self, schema_name, schema_file_path):
'\n \n Loads the dbpedia schema used for supporting downstream analysis. If the schema already exists, it is\n dropped (deleted) and recreated.\n \n :param schema_name: \n :param schema_file_path: \n :return: \n '
with self.connection.cursor() as cursor:
cursor.execute(('DROP SCHEMA IF EXISTS %s CASCADE' % schema_name))
schema_file = open(schema_file_path, 'rU').read()
cursor.execute(schema_file) |
def build_indices(self):
'\n \n Builds the following indices:\n \n Index on name for subjects\n Index on predicate for predicate_object\n Index on subject_id for predicate object\n \n :return: \n '
with self.connection.cursor() as cursor:
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_subject_id_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.subject_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_predicate_idx')
cursor.execute('create index subject_idx on dbpedia.subjects (name)')
cursor.execute('create index pv_subject_id_idx on dbpedia.predicate_object (subject_id)')
cursor.execute('create index pv_predicate_idx on dbpedia.predicate_object (predicate);') | 138,920,001,608,513,780 | Builds the following indices:
Index on name for subjects
Index on predicate for predicate_object
Index on subject_id for predicate object
:return: | database_query_handler.py | build_indices | jdwinkler/dbpedia_service | python | def build_indices(self):
'\n \n Builds the following indices:\n \n Index on name for subjects\n Index on predicate for predicate_object\n Index on subject_id for predicate object\n \n :return: \n '
with self.connection.cursor() as cursor:
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_subject_id_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.subject_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_predicate_idx')
cursor.execute('create index subject_idx on dbpedia.subjects (name)')
cursor.execute('create index pv_subject_id_idx on dbpedia.predicate_object (subject_id)')
cursor.execute('create index pv_predicate_idx on dbpedia.predicate_object (predicate);') |
def insert_spo_tuple(self, spo_tuple):
'\n \n Handles the insertion of spo tuples into the db. Workflow:\n \n Attempt to find the subject table entry corresponding to your subject. If found, use that ID for\n inserting your po values. Otherwise, insert your subject into the subject table and use that ID\n instead. The resulting id, predicate, object tuple is then inserted into the predicate_object table.\n \n :param spo_tuple: \n :return: \n '
(subject, predicate, db_object) = spo_tuple
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute('select subject_id from dbpedia.subjects where name = %s', (subject,))
results = cursor.fetchone()
if ((results is None) or (len(results) == 0)):
cursor.execute('INSERT INTO dbpedia.subjects (name) VALUES (%s) returning subject_id', (subject,))
results = cursor.fetchone()
id = results['subject_id']
cursor.execute('INSERT INTO dbpedia.predicate_object (subject_id, predicate, object) VALUES (%s, %s, %s)', (id, predicate, db_object)) | -8,441,923,402,302,579,000 | Handles the insertion of spo tuples into the db. Workflow:
Attempt to find the subject table entry corresponding to your subject. If found, use that ID for
inserting your po values. Otherwise, insert your subject into the subject table and use that ID
instead. The resulting id, predicate, object tuple is then inserted into the predicate_object table.
:param spo_tuple:
:return: | database_query_handler.py | insert_spo_tuple | jdwinkler/dbpedia_service | python | def insert_spo_tuple(self, spo_tuple):
'\n \n Handles the insertion of spo tuples into the db. Workflow:\n \n Attempt to find the subject table entry corresponding to your subject. If found, use that ID for\n inserting your po values. Otherwise, insert your subject into the subject table and use that ID\n instead. The resulting id, predicate, object tuple is then inserted into the predicate_object table.\n \n :param spo_tuple: \n :return: \n '
(subject, predicate, db_object) = spo_tuple
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute('select subject_id from dbpedia.subjects where name = %s', (subject,))
results = cursor.fetchone()
if ((results is None) or (len(results) == 0)):
cursor.execute('INSERT INTO dbpedia.subjects (name) VALUES (%s) returning subject_id', (subject,))
results = cursor.fetchone()
id = results['subject_id']
cursor.execute('INSERT INTO dbpedia.predicate_object (subject_id, predicate, object) VALUES (%s, %s, %s)', (id, predicate, db_object)) |
def get_person_metadata(self, person_name, use_exact_match=False):
'\n \n Returns all metadata associated with the provided person_name. However, does not actually check\n to see if the identifier corresponds to a person or not; the class of the identifier will\n be included in the returned metadata though. DBPedia People only contains people predicate\n types as well.\n \n Use_exact_match toggles between two behaviors: if True, then uses the exact identifier provided\n to query against the subject table (WHERE = identifier). If False, uses the LIKE operator\n to attempt to find similar IDs that are not exactly the same. Results will still be a superset\n of the use_exact_match = True case.\n \n :param person_name: \n :param use_exact_match:\n :return: \n '
person_name = person_name.replace(' ', '_').upper()
with self.connection.cursor() as cursor:
if (not use_exact_match):
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) LIKE %s', ((('%%' + person_name) + '%%'),))
else:
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) = %s', (person_name,))
results = cursor.fetchall()
if (results is None):
return []
subject_id_list = [x[0] for x in results]
cursor.execute('select dbpedia.subjects.name, predicate, object FROM dbpedia.predicate_object INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) WHERE dbpedia.predicate_object.subject_id = ANY(%s)', (subject_id_list,))
return sorted(cursor.fetchall(), key=(lambda x: x[0])) | -1,959,901,038,615,787,000 | Returns all metadata associated with the provided person_name. However, does not actually check
to see if the identifier corresponds to a person or not; the class of the identifier will
be included in the returned metadata though. DBPedia People only contains people predicate
types as well.
Use_exact_match toggles between two behaviors: if True, then uses the exact identifier provided
to query against the subject table (WHERE = identifier). If False, uses the LIKE operator
to attempt to find similar IDs that are not exactly the same. Results will still be a superset
of the use_exact_match = True case.
:param person_name:
:param use_exact_match:
:return: | database_query_handler.py | get_person_metadata | jdwinkler/dbpedia_service | python | def get_person_metadata(self, person_name, use_exact_match=False):
'\n \n Returns all metadata associated with the provided person_name. However, does not actually check\n to see if the identifier corresponds to a person or not; the class of the identifier will\n be included in the returned metadata though. DBPedia People only contains people predicate\n types as well.\n \n Use_exact_match toggles between two behaviors: if True, then uses the exact identifier provided\n to query against the subject table (WHERE = identifier). If False, uses the LIKE operator\n to attempt to find similar IDs that are not exactly the same. Results will still be a superset\n of the use_exact_match = True case.\n \n :param person_name: \n :param use_exact_match:\n :return: \n '
person_name = person_name.replace(' ', '_').upper()
with self.connection.cursor() as cursor:
if (not use_exact_match):
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) LIKE %s', ((('%%' + person_name) + '%%'),))
else:
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) = %s', (person_name,))
results = cursor.fetchall()
if (results is None):
return []
subject_id_list = [x[0] for x in results]
cursor.execute('select dbpedia.subjects.name, predicate, object FROM dbpedia.predicate_object INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) WHERE dbpedia.predicate_object.subject_id = ANY(%s)', (subject_id_list,))
return sorted(cursor.fetchall(), key=(lambda x: x[0])) |
def get_tuples_by_predicate(self, predicate_of_interest):
'\n \n Extracts SPO tuples based on the predicate value passed to the function. This query will be slow since\n you are querying such a large fraction of the po table at once (unless your predicate does not exist).\n \n Predicates:\n \n Name\n Type\n Gender\n Description\n Birthdate\n GivenName\n Surname\n BirthPlace\n DeathDate\n DeathPlace\n \n :param predicate_of_interest: \n :return: \n '
with self.connection.cursor() as cursor:
cursor.execute('select dbpedia.subjects.name, predicate, object FROM dbpedia.predicate_object INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) WHERE upper(dbpedia.predicate_object.predicate) = upper(%s)', (predicate_of_interest,))
results = cursor.fetchall()
if (results is None):
return []
else:
return results | -4,161,906,530,598,888,400 | Extracts SPO tuples based on the predicate value passed to the function. This query will be slow since
you are querying such a large fraction of the po table at once (unless your predicate does not exist).
Predicates:
Name
Type
Gender
Description
Birthdate
GivenName
Surname
BirthPlace
DeathDate
DeathPlace
:param predicate_of_interest:
:return: | database_query_handler.py | get_tuples_by_predicate | jdwinkler/dbpedia_service | python | def get_tuples_by_predicate(self, predicate_of_interest):
'\n \n Extracts SPO tuples based on the predicate value passed to the function. This query will be slow since\n you are querying such a large fraction of the po table at once (unless your predicate does not exist).\n \n Predicates:\n \n Name\n Type\n Gender\n Description\n Birthdate\n GivenName\n Surname\n BirthPlace\n DeathDate\n DeathPlace\n \n :param predicate_of_interest: \n :return: \n '
with self.connection.cursor() as cursor:
cursor.execute('select dbpedia.subjects.name, predicate, object FROM dbpedia.predicate_object INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) WHERE upper(dbpedia.predicate_object.predicate) = upper(%s)', (predicate_of_interest,))
results = cursor.fetchall()
if (results is None):
return []
else:
return results |
def registry():
'Map reserved names and scopes to their conversion functions.'
reg = {'Placeholder': _placeholder, 'Const': _constant, 'Conv2D': _conv2d, 'Relu': _relu, 'Sigmoid': _sigmoid, 'MatMul': _matmul, 'Shape': _shape, 'StridedSlice': _strided_slice, 'Add': _add, 'Sub': _sub, 'Transpose': _transpose, 'Reshape': _reshape, 'Pack': _pack, 'Rsqrt': _rsqrt, 'Mul': _mul, 'ExpandDims': _expand_dims, 'AvgPool': _avgpool, 'Squeeze': _squeeze, 'ConcatV2': _concat, 'BiasAdd': _bias_add, 'MaxPool': _maxpool, 'Pad': _pad, 'BatchToSpaceND': _batch_to_space_nd, 'SpaceToBatchND': _space_to_batch_nd, 'ArgMax': _argmax, 'required_space_to_batch_paddings': _required_space_to_batch_paddings, 'flatten': _flatten, 'conv2d': _keras_conv2d, 'Slice': _slice, 'Neg': _negative, 'Split': _split, 'Identity': _identity, 'GatherV2': _gather, 'dense': _keras_dense}
return reg | 9,216,679,404,523,882,000 | Map reserved names and scopes to their conversion functions. | tf_encrypted/convert/register.py | registry | capeprivacy/tf-encrypted | python | def registry():
reg = {'Placeholder': _placeholder, 'Const': _constant, 'Conv2D': _conv2d, 'Relu': _relu, 'Sigmoid': _sigmoid, 'MatMul': _matmul, 'Shape': _shape, 'StridedSlice': _strided_slice, 'Add': _add, 'Sub': _sub, 'Transpose': _transpose, 'Reshape': _reshape, 'Pack': _pack, 'Rsqrt': _rsqrt, 'Mul': _mul, 'ExpandDims': _expand_dims, 'AvgPool': _avgpool, 'Squeeze': _squeeze, 'ConcatV2': _concat, 'BiasAdd': _bias_add, 'MaxPool': _maxpool, 'Pad': _pad, 'BatchToSpaceND': _batch_to_space_nd, 'SpaceToBatchND': _space_to_batch_nd, 'ArgMax': _argmax, 'required_space_to_batch_paddings': _required_space_to_batch_paddings, 'flatten': _flatten, 'conv2d': _keras_conv2d, 'Slice': _slice, 'Neg': _negative, 'Split': _split, 'Identity': _identity, 'GatherV2': _gather, 'dense': _keras_dense}
return reg |
def _nodef_to_public_pond(converter, x):
'Map a NodeDef x to a PublicPondTensor.'
dtype = x.attr['dtype'].type
x_shape = [i.size for i in x.attr['value'].tensor.tensor_shape.dim]
if (not x_shape):
if (dtype == tf.float32):
nums = x.attr['value'].tensor.float_val
elif (dtype == tf.float64):
nums = x.attr['value'].tensor.float_val
elif (dtype == tf.int32):
nums = x.attr['value'].tensor.int_val
else:
raise TypeError('Unsupported dtype')
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if (dtype == tf.float32):
nums = array.array('f', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.float64):
nums = array.array('d', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.int32):
nums = array.array('i', x.attr['value'].tensor.tensor_content)
else:
raise TypeError('Unsupported dtype')
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_public = converter.protocol.define_public_input(converter.model_provider, inputter_fn)
return x_public | -8,910,634,418,428,920,000 | Map a NodeDef x to a PublicPondTensor. | tf_encrypted/convert/register.py | _nodef_to_public_pond | capeprivacy/tf-encrypted | python | def _nodef_to_public_pond(converter, x):
dtype = x.attr['dtype'].type
x_shape = [i.size for i in x.attr['value'].tensor.tensor_shape.dim]
if (not x_shape):
if (dtype == tf.float32):
nums = x.attr['value'].tensor.float_val
elif (dtype == tf.float64):
nums = x.attr['value'].tensor.float_val
elif (dtype == tf.int32):
nums = x.attr['value'].tensor.int_val
else:
raise TypeError('Unsupported dtype')
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if (dtype == tf.float32):
nums = array.array('f', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.float64):
nums = array.array('d', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.int32):
nums = array.array('i', x.attr['value'].tensor.tensor_content)
else:
raise TypeError('Unsupported dtype')
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_public = converter.protocol.define_public_input(converter.model_provider, inputter_fn)
return x_public |
def _nodef_to_private_pond(converter, x):
'Map a NodeDef x to a PrivatePondTensor.'
dtype = x.attr['dtype'].type
warn_msg = 'Unexpected dtype {} found at node {}'
err_msg = 'Unsupported dtype {} found at node {}'
x_shape = [i.size for i in x.attr['value'].tensor.tensor_shape.dim]
if (not x_shape):
if (dtype == tf.float32):
nums = x.attr['value'].tensor.float_val
elif (dtype == tf.float64):
nums = x.attr['value'].tensor.float_val
elif (dtype == tf.int32):
logging.warning(warn_msg, dtype, x.name)
nums = x.attr['value'].tensor.int_val
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if (dtype == tf.float32):
nums = array.array('f', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.float64):
nums = array.array('d', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.int32):
logging.warning(warn_msg, dtype, x.name)
nums = array.array('i', x.attr['value'].tensor.tensor_content)
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_private = converter.protocol.define_private_input(converter.model_provider, inputter_fn)
return x_private | 447,014,265,472,516,900 | Map a NodeDef x to a PrivatePondTensor. | tf_encrypted/convert/register.py | _nodef_to_private_pond | capeprivacy/tf-encrypted | python | def _nodef_to_private_pond(converter, x):
dtype = x.attr['dtype'].type
warn_msg = 'Unexpected dtype {} found at node {}'
err_msg = 'Unsupported dtype {} found at node {}'
x_shape = [i.size for i in x.attr['value'].tensor.tensor_shape.dim]
if (not x_shape):
if (dtype == tf.float32):
nums = x.attr['value'].tensor.float_val
elif (dtype == tf.float64):
nums = x.attr['value'].tensor.float_val
elif (dtype == tf.int32):
logging.warning(warn_msg, dtype, x.name)
nums = x.attr['value'].tensor.int_val
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if (dtype == tf.float32):
nums = array.array('f', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.float64):
nums = array.array('d', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.int32):
logging.warning(warn_msg, dtype, x.name)
nums = array.array('i', x.attr['value'].tensor.tensor_content)
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_private = converter.protocol.define_private_input(converter.model_provider, inputter_fn)
return x_private |
def _nodef_to_numpy_array(x):
'Map a NodeDef x to a np.array.'
dtype = x.attr['dtype'].type
x_shape = [i.size for i in x.attr['value'].tensor.tensor_shape.dim]
if (dtype == tf.float32):
nums = array.array('f', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.float64):
nums = array.array('d', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.int32):
nums = array.array('i', x.attr['value'].tensor.tensor_content)
else:
raise TypeError('Unsupported dtype')
return np.array(nums).reshape(x_shape) | 8,830,999,278,678,323,000 | Map a NodeDef x to a np.array. | tf_encrypted/convert/register.py | _nodef_to_numpy_array | capeprivacy/tf-encrypted | python | def _nodef_to_numpy_array(x):
dtype = x.attr['dtype'].type
x_shape = [i.size for i in x.attr['value'].tensor.tensor_shape.dim]
if (dtype == tf.float32):
nums = array.array('f', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.float64):
nums = array.array('d', x.attr['value'].tensor.tensor_content)
elif (dtype == tf.int32):
nums = array.array('i', x.attr['value'].tensor.tensor_content)
else:
raise TypeError('Unsupported dtype')
return np.array(nums).reshape(x_shape) |
def create_debug_adaptor(self):
'Create the Visual Studio Code debug adaptor'
self.assertTrue(os.path.exists(self.lldbVSCodeExec), 'lldb-vscode must exist')
log_file_path = self.getBuildArtifact('vscode.txt')
self.vscode = vscode.DebugAdaptor(executable=self.lldbVSCodeExec, init_commands=self.setUpCommands(), log_file=log_file_path) | -3,664,724,858,036,987,400 | Create the Visual Studio Code debug adaptor | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | create_debug_adaptor | Diatrus/llvm-project | python | def create_debug_adaptor(self):
self.assertTrue(os.path.exists(self.lldbVSCodeExec), 'lldb-vscode must exist')
log_file_path = self.getBuildArtifact('vscode.txt')
self.vscode = vscode.DebugAdaptor(executable=self.lldbVSCodeExec, init_commands=self.setUpCommands(), log_file=log_file_path) |
def set_source_breakpoints(self, source_path, lines, condition=None, hitCondition=None):
'Sets source breakpoints and returns an array of strings containing\n the breakpoint IDs ("1", "2") for each breakpoint that was set.\n '
response = self.vscode.request_setBreakpoints(source_path, lines, condition=condition, hitCondition=hitCondition)
if (response is None):
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append(('%i' % breakpoint['id']))
return breakpoint_ids | -5,090,111,554,572,923,000 | Sets source breakpoints and returns an array of strings containing
the breakpoint IDs ("1", "2") for each breakpoint that was set. | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | set_source_breakpoints | Diatrus/llvm-project | python | def set_source_breakpoints(self, source_path, lines, condition=None, hitCondition=None):
'Sets source breakpoints and returns an array of strings containing\n the breakpoint IDs ("1", "2") for each breakpoint that was set.\n '
response = self.vscode.request_setBreakpoints(source_path, lines, condition=condition, hitCondition=hitCondition)
if (response is None):
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append(('%i' % breakpoint['id']))
return breakpoint_ids |
def set_function_breakpoints(self, functions, condition=None, hitCondition=None):
'Sets breakpoints by function name given an array of function names\n and returns an array of strings containing the breakpoint IDs\n ("1", "2") for each breakpoint that was set.\n '
response = self.vscode.request_setFunctionBreakpoints(functions, condition=condition, hitCondition=hitCondition)
if (response is None):
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append(('%i' % breakpoint['id']))
return breakpoint_ids | -6,898,889,011,605,928,000 | Sets breakpoints by function name given an array of function names
and returns an array of strings containing the breakpoint IDs
("1", "2") for each breakpoint that was set. | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | set_function_breakpoints | Diatrus/llvm-project | python | def set_function_breakpoints(self, functions, condition=None, hitCondition=None):
'Sets breakpoints by function name given an array of function names\n and returns an array of strings containing the breakpoint IDs\n ("1", "2") for each breakpoint that was set.\n '
response = self.vscode.request_setFunctionBreakpoints(functions, condition=condition, hitCondition=hitCondition)
if (response is None):
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append(('%i' % breakpoint['id']))
return breakpoint_ids |
def verify_breakpoint_hit(self, breakpoint_ids):
'Wait for the process we are debugging to stop, and verify we hit\n any breakpoint location in the "breakpoint_ids" array.\n "breakpoint_ids" should be a list of breakpoint ID strings\n (["1", "2"]). The return value from self.set_source_breakpoints()\n or self.set_function_breakpoints() can be passed to this function'
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if ('body' in stopped_event):
body = stopped_event['body']
if ('reason' not in body):
continue
if (body['reason'] != 'breakpoint'):
continue
if ('description' not in body):
continue
description = body['description']
print(('description: %s' % description))
for breakpoint_id in breakpoint_ids:
match_desc = ('breakpoint %s.' % breakpoint_id)
if (match_desc in description):
return
self.assertTrue(False, 'breakpoint not hit') | -6,815,606,521,215,621,000 | Wait for the process we are debugging to stop, and verify we hit
any breakpoint location in the "breakpoint_ids" array.
"breakpoint_ids" should be a list of breakpoint ID strings
(["1", "2"]). The return value from self.set_source_breakpoints()
or self.set_function_breakpoints() can be passed to this function | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | verify_breakpoint_hit | Diatrus/llvm-project | python | def verify_breakpoint_hit(self, breakpoint_ids):
'Wait for the process we are debugging to stop, and verify we hit\n any breakpoint location in the "breakpoint_ids" array.\n "breakpoint_ids" should be a list of breakpoint ID strings\n (["1", "2"]). The return value from self.set_source_breakpoints()\n or self.set_function_breakpoints() can be passed to this function'
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if ('body' in stopped_event):
body = stopped_event['body']
if ('reason' not in body):
continue
if (body['reason'] != 'breakpoint'):
continue
if ('description' not in body):
continue
description = body['description']
print(('description: %s' % description))
for breakpoint_id in breakpoint_ids:
match_desc = ('breakpoint %s.' % breakpoint_id)
if (match_desc in description):
return
self.assertTrue(False, 'breakpoint not hit') |
def verify_exception_breakpoint_hit(self, filter_label):
"Wait for the process we are debugging to stop, and verify the stop\n reason is 'exception' and that the description matches\n 'filter_label'\n "
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if ('body' in stopped_event):
body = stopped_event['body']
if ('reason' not in body):
continue
if (body['reason'] != 'exception'):
continue
if ('description' not in body):
continue
description = body['description']
if (filter_label == description):
return True
return False | 77,465,806,993,255,650 | Wait for the process we are debugging to stop, and verify the stop
reason is 'exception' and that the description matches
'filter_label' | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | verify_exception_breakpoint_hit | Diatrus/llvm-project | python | def verify_exception_breakpoint_hit(self, filter_label):
"Wait for the process we are debugging to stop, and verify the stop\n reason is 'exception' and that the description matches\n 'filter_label'\n "
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if ('body' in stopped_event):
body = stopped_event['body']
if ('reason' not in body):
continue
if (body['reason'] != 'exception'):
continue
if ('description' not in body):
continue
description = body['description']
if (filter_label == description):
return True
return False |
def get_dict_value(self, d, key_path):
'Verify each key in the key_path array is in contained in each\n dictionary within "d". Assert if any key isn\'t in the\n corresponding dictionary. This is handy for grabbing values from VS\n Code response dictionary like getting\n response[\'body\'][\'stackFrames\']\n '
value = d
for key in key_path:
if (key in value):
value = value[key]
else:
self.assertTrue((key in value), ('key "%s" from key_path "%s" not in "%s"' % (key, key_path, d)))
return value | -4,796,444,409,052,997,000 | Verify each key in the key_path array is in contained in each
dictionary within "d". Assert if any key isn't in the
corresponding dictionary. This is handy for grabbing values from VS
Code response dictionary like getting
response['body']['stackFrames'] | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | get_dict_value | Diatrus/llvm-project | python | def get_dict_value(self, d, key_path):
'Verify each key in the key_path array is in contained in each\n dictionary within "d". Assert if any key isn\'t in the\n corresponding dictionary. This is handy for grabbing values from VS\n Code response dictionary like getting\n response[\'body\'][\'stackFrames\']\n '
value = d
for key in key_path:
if (key in value):
value = value[key]
else:
self.assertTrue((key in value), ('key "%s" from key_path "%s" not in "%s"' % (key, key_path, d)))
return value |
def set_local(self, name, value, id=None):
'Set a top level local variable only.'
return self.vscode.request_setVariable(1, name, str(value), id=id) | -5,406,429,082,478,088,000 | Set a top level local variable only. | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | set_local | Diatrus/llvm-project | python | def set_local(self, name, value, id=None):
return self.vscode.request_setVariable(1, name, str(value), id=id) |
def set_global(self, name, value, id=None):
'Set a top level global variable only.'
return self.vscode.request_setVariable(2, name, str(value), id=id) | 1,311,405,924,274,155,000 | Set a top level global variable only. | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | set_global | Diatrus/llvm-project | python | def set_global(self, name, value, id=None):
return self.vscode.request_setVariable(2, name, str(value), id=id) |
def attach(self, program=None, pid=None, waitFor=None, trace=None, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, attachCommands=None, coreFile=None):
'Build the default Makefile target, create the VSCode debug adaptor,\n and attach to the process.\n '
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
self.addTearDownHook(cleanup)
self.vscode.request_initialize()
response = self.vscode.request_attach(program=program, pid=pid, waitFor=waitFor, trace=trace, initCommands=initCommands, preRunCommands=preRunCommands, stopCommands=stopCommands, exitCommands=exitCommands, attachCommands=attachCommands, coreFile=coreFile)
if (not (response and response['success'])):
self.assertTrue(response['success'], ('attach failed (%s)' % response['message'])) | -5,515,005,714,230,878,000 | Build the default Makefile target, create the VSCode debug adaptor,
and attach to the process. | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | attach | Diatrus/llvm-project | python | def attach(self, program=None, pid=None, waitFor=None, trace=None, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, attachCommands=None, coreFile=None):
'Build the default Makefile target, create the VSCode debug adaptor,\n and attach to the process.\n '
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
self.addTearDownHook(cleanup)
self.vscode.request_initialize()
response = self.vscode.request_attach(program=program, pid=pid, waitFor=waitFor, trace=trace, initCommands=initCommands, preRunCommands=preRunCommands, stopCommands=stopCommands, exitCommands=exitCommands, attachCommands=attachCommands, coreFile=coreFile)
if (not (response and response['success'])):
self.assertTrue(response['success'], ('attach failed (%s)' % response['message'])) |
def launch(self, program=None, args=None, cwd=None, env=None, stopOnEntry=False, disableASLR=True, disableSTDIO=False, shellExpandArguments=False, trace=False, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, sourcePath=None, debuggerRoot=None, launchCommands=None, sourceMap=None):
'Sending launch request to vscode\n '
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
self.addTearDownHook(cleanup)
self.vscode.request_initialize()
response = self.vscode.request_launch(program, args=args, cwd=cwd, env=env, stopOnEntry=stopOnEntry, disableASLR=disableASLR, disableSTDIO=disableSTDIO, shellExpandArguments=shellExpandArguments, trace=trace, initCommands=initCommands, preRunCommands=preRunCommands, stopCommands=stopCommands, exitCommands=exitCommands, sourcePath=sourcePath, debuggerRoot=debuggerRoot, launchCommands=launchCommands, sourceMap=sourceMap)
if (not (response and response['success'])):
self.assertTrue(response['success'], ('launch failed (%s)' % response['message'])) | 7,559,166,695,298,282,000 | Sending launch request to vscode | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | launch | Diatrus/llvm-project | python | def launch(self, program=None, args=None, cwd=None, env=None, stopOnEntry=False, disableASLR=True, disableSTDIO=False, shellExpandArguments=False, trace=False, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, sourcePath=None, debuggerRoot=None, launchCommands=None, sourceMap=None):
'\n '
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
self.addTearDownHook(cleanup)
self.vscode.request_initialize()
response = self.vscode.request_launch(program, args=args, cwd=cwd, env=env, stopOnEntry=stopOnEntry, disableASLR=disableASLR, disableSTDIO=disableSTDIO, shellExpandArguments=shellExpandArguments, trace=trace, initCommands=initCommands, preRunCommands=preRunCommands, stopCommands=stopCommands, exitCommands=exitCommands, sourcePath=sourcePath, debuggerRoot=debuggerRoot, launchCommands=launchCommands, sourceMap=sourceMap)
if (not (response and response['success'])):
self.assertTrue(response['success'], ('launch failed (%s)' % response['message'])) |
def build_and_launch(self, program, args=None, cwd=None, env=None, stopOnEntry=False, disableASLR=True, disableSTDIO=False, shellExpandArguments=False, trace=False, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, sourcePath=None, debuggerRoot=None):
'Build the default Makefile target, create the VSCode debug adaptor,\n and launch the process.\n '
self.build_and_create_debug_adaptor()
self.assertTrue(os.path.exists(program), 'executable must exist')
self.launch(program, args, cwd, env, stopOnEntry, disableASLR, disableSTDIO, shellExpandArguments, trace, initCommands, preRunCommands, stopCommands, exitCommands, sourcePath, debuggerRoot) | -9,165,731,098,034,198,000 | Build the default Makefile target, create the VSCode debug adaptor,
and launch the process. | lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | build_and_launch | Diatrus/llvm-project | python | def build_and_launch(self, program, args=None, cwd=None, env=None, stopOnEntry=False, disableASLR=True, disableSTDIO=False, shellExpandArguments=False, trace=False, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, sourcePath=None, debuggerRoot=None):
'Build the default Makefile target, create the VSCode debug adaptor,\n and launch the process.\n '
self.build_and_create_debug_adaptor()
self.assertTrue(os.path.exists(program), 'executable must exist')
self.launch(program, args, cwd, env, stopOnEntry, disableASLR, disableSTDIO, shellExpandArguments, trace, initCommands, preRunCommands, stopCommands, exitCommands, sourcePath, debuggerRoot) |
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0):
"Performs a random rotation of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n rg: Rotation range, in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Rotated Numpy image tensor.\n "
theta = np.deg2rad(np.random.uniform((- rg), rg))
rotation_matrix = np.array([[np.cos(theta), (- np.sin(theta)), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
(h, w) = (x.shape[row_axis], x.shape[col_axis])
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x | -6,406,237,366,066,074,000 | Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor. | keras/preprocessing/image.py | random_rotation | HangJie720/keras | python | def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0):
"Performs a random rotation of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n rg: Rotation range, in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Rotated Numpy image tensor.\n "
theta = np.deg2rad(np.random.uniform((- rg), rg))
rotation_matrix = np.array([[np.cos(theta), (- np.sin(theta)), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
(h, w) = (x.shape[row_axis], x.shape[col_axis])
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x |
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0):
"Performs a random spatial shift of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n wrg: Width shift range, as a float fraction of the width.\n hrg: Height shift range, as a float fraction of the height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Shifted Numpy image tensor.\n "
(h, w) = (x.shape[row_axis], x.shape[col_axis])
tx = (np.random.uniform((- hrg), hrg) * h)
ty = (np.random.uniform((- wrg), wrg) * w)
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x | 1,415,840,483,480,128,800 | Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor. | keras/preprocessing/image.py | random_shift | HangJie720/keras | python | def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0):
"Performs a random spatial shift of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n wrg: Width shift range, as a float fraction of the width.\n hrg: Height shift range, as a float fraction of the height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Shifted Numpy image tensor.\n "
(h, w) = (x.shape[row_axis], x.shape[col_axis])
tx = (np.random.uniform((- hrg), hrg) * h)
ty = (np.random.uniform((- wrg), wrg) * w)
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x |
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0):
"Performs a random spatial shear of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Sheared Numpy image tensor.\n "
shear = np.deg2rad(np.random.uniform((- intensity), intensity))
shear_matrix = np.array([[1, (- np.sin(shear)), 0], [0, np.cos(shear), 0], [0, 0, 1]])
(h, w) = (x.shape[row_axis], x.shape[col_axis])
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x | -5,416,318,567,962,142,000 | Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor. | keras/preprocessing/image.py | random_shear | HangJie720/keras | python | def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0):
"Performs a random spatial shear of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Sheared Numpy image tensor.\n "
shear = np.deg2rad(np.random.uniform((- intensity), intensity))
shear_matrix = np.array([[1, (- np.sin(shear)), 0], [0, np.cos(shear), 0], [0, 0, 1]])
(h, w) = (x.shape[row_axis], x.shape[col_axis])
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x |
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0):
"Performs a random spatial zoom of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n zoom_range: Tuple of floats; zoom range for width and height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Zoomed Numpy image tensor.\n\n # Raises\n ValueError: if `zoom_range` isn't a tuple.\n "
if (len(zoom_range) != 2):
raise ValueError('`zoom_range` should be a tuple or list of two floats. Received: ', zoom_range)
if ((zoom_range[0] == 1) and (zoom_range[1] == 1)):
(zx, zy) = (1, 1)
else:
(zx, zy) = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
(h, w) = (x.shape[row_axis], x.shape[col_axis])
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x | -1,170,494,675,024,863,500 | Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple. | keras/preprocessing/image.py | random_zoom | HangJie720/keras | python | def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0.0):
"Performs a random spatial zoom of a Numpy image tensor.\n\n # Arguments\n x: Input tensor. Must be 3D.\n zoom_range: Tuple of floats; zoom range for width and height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n\n # Returns\n Zoomed Numpy image tensor.\n\n # Raises\n ValueError: if `zoom_range` isn't a tuple.\n "
if (len(zoom_range) != 2):
raise ValueError('`zoom_range` should be a tuple or list of two floats. Received: ', zoom_range)
if ((zoom_range[0] == 1) and (zoom_range[1] == 1)):
(zx, zy) = (1, 1)
else:
(zx, zy) = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
(h, w) = (x.shape[row_axis], x.shape[col_axis])
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x |
def random_channel_shift(x, intensity, channel_axis=0):
'Performs a random channel shift.\n\n # Arguments\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n # Returns\n Numpy image tensor.\n\n '
x = np.rollaxis(x, channel_axis, 0)
(min_x, max_x) = (np.min(x), np.max(x))
channel_images = [np.clip((x_channel + np.random.uniform((- intensity), intensity)), min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, (channel_axis + 1))
return x | 6,769,834,749,985,244,000 | Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor. | keras/preprocessing/image.py | random_channel_shift | HangJie720/keras | python | def random_channel_shift(x, intensity, channel_axis=0):
'Performs a random channel shift.\n\n # Arguments\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n # Returns\n Numpy image tensor.\n\n '
x = np.rollaxis(x, channel_axis, 0)
(min_x, max_x) = (np.min(x), np.max(x))
channel_images = [np.clip((x_channel + np.random.uniform((- intensity), intensity)), min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, (channel_axis + 1))
return x |
def random_brightness(x, brightness_range):
"Performs a random brightness shift.\n\n # Arguments\n x: Input tensor. Must be 3D.\n brightness_range: Tuple of floats; brightness range.\n channel_axis: Index of axis for channels in the input tensor.\n\n # Returns\n Numpy image tensor.\n\n # Raises\n ValueError if `brightness_range` isn't a tuple.\n\n "
if (len(brightness_range) != 2):
raise ValueError(('`brightness_range should be tuple or list of two floats. Received: %s' % brightness_range))
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x | 1,916,011,930,223,731,200 | Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple. | keras/preprocessing/image.py | random_brightness | HangJie720/keras | python | def random_brightness(x, brightness_range):
"Performs a random brightness shift.\n\n # Arguments\n x: Input tensor. Must be 3D.\n brightness_range: Tuple of floats; brightness range.\n channel_axis: Index of axis for channels in the input tensor.\n\n # Returns\n Numpy image tensor.\n\n # Raises\n ValueError if `brightness_range` isn't a tuple.\n\n "
if (len(brightness_range) != 2):
raise ValueError(('`brightness_range should be tuple or list of two floats. Received: %s' % brightness_range))
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.