max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
testing/conftest.py | davidszotten/pdbpp | 0 | 4300 | import functools
import sys
from contextlib import contextmanager
import pytest
_orig_trace = None
def pytest_configure():
global _orig_trace
_orig_trace = sys.gettrace()
@pytest.fixture(scope="session", autouse=True)
def term():
"""Configure TERM for predictable output from Pygments."""
from _pytest.monkeypatch import MonkeyPatch
m = MonkeyPatch()
m.setenv("TERM", "xterm-256color")
yield m
m.undo()
# if _orig_trace and not hasattr(sys, "pypy_version_info"):
# Fails with PyPy2 (https://travis-ci.org/antocuni/pdb/jobs/509624590)?!
@pytest.fixture(autouse=True)
def restore_settrace(monkeypatch):
"""(Re)store sys.gettrace after test run.
This is required to re-enable coverage tracking.
"""
assert sys.gettrace() is _orig_trace
orig_settrace = sys.settrace
# Wrap sys.settrace to restore original tracing function (coverage)
# with `sys.settrace(None)`.
def settrace(func):
if func is None:
orig_settrace(_orig_trace)
else:
orig_settrace(func)
monkeypatch.setattr("sys.settrace", settrace)
yield
newtrace = sys.gettrace()
if newtrace is not _orig_trace:
sys.settrace(_orig_trace)
assert newtrace is None
@pytest.fixture(scope="session")
def _tmphome_path(tmpdir_factory):
return tmpdir_factory.mktemp("tmphome")
@pytest.fixture(autouse=sys.version_info < (3, 6))
def tmphome(request, monkeypatch):
"""Set up HOME in a temporary directory.
This ignores any real ~/.pdbrc.py then, and seems to be
required also with linecache on py27, where it would read contents from
~/.pdbrc?!.
"""
# Use tmpdir from testdir, if it is used.
if "testdir" in request.fixturenames:
tmpdir = request.getfixturevalue("testdir").tmpdir
else:
tmpdir = request.getfixturevalue("_tmphome_path")
monkeypatch.setenv("HOME", str(tmpdir))
monkeypatch.setenv("USERPROFILE", str(tmpdir))
with tmpdir.as_cwd():
yield tmpdir
@pytest.fixture(params=("pyrepl", "readline"), scope="session")
def readline_param(request):
from _pytest.monkeypatch import MonkeyPatch
m = MonkeyPatch()
if request.param == "pyrepl":
try:
import pyrepl.readline # noqa: F401
except ImportError as exc:
pytest.skip(msg="pyrepl not available: {}".format(exc))
m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", True)
else:
m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", False)
return request.param
@pytest.fixture
def monkeypatch_readline(request, monkeypatch, readline_param):
"""Patch readline to return given results."""
def inner(line, begidx, endidx):
if readline_param == "pyrepl":
readline = "pyrepl.readline"
else:
assert readline_param == "readline"
readline = "readline"
monkeypatch.setattr("%s.get_line_buffer" % readline, lambda: line)
monkeypatch.setattr("%s.get_begidx" % readline, lambda: begidx)
monkeypatch.setattr("%s.get_endidx" % readline, lambda: endidx)
return inner
@pytest.fixture
def monkeypatch_pdb_methods(monkeypatch):
def mock(method, *args, **kwargs):
print("=== %s(%s, %s)" % (method, args, kwargs))
for mock_method in ("set_trace", "set_continue"):
monkeypatch.setattr(
"pdb.pdb.Pdb.%s" % mock_method, functools.partial(mock, mock_method)
)
@pytest.fixture
def monkeypatch_importerror(monkeypatch):
@contextmanager
def cm(mocked_imports):
orig_import = __import__
def import_mock(name, *args):
if name in mocked_imports:
raise ImportError
return orig_import(name, *args)
with monkeypatch.context() as m:
if sys.version_info >= (3,):
m.setattr('builtins.__import__', import_mock)
else:
m.setattr('__builtin__.__import__', import_mock)
yield m
return cm
| 2.078125 | 2 |
thing_gym_ros/envs/utils.py | utiasSTARS/thing-gym-ros | 1 | 4301 | <gh_stars>1-10
""" Various generic env utilties. """
def center_crop_img(img, crop_zoom):
""" crop_zoom is amount to "zoom" into the image. E.g. 2.0 would cut out half of the width,
half of the height, and only give the center. """
raw_height, raw_width = img.shape[:2]
center = raw_height // 2, raw_width // 2
crop_size = raw_height // crop_zoom, raw_width // crop_zoom
min_y, max_y = int(center[0] - crop_size[0] // 2), int(center[0] + crop_size[0] // 2)
min_x, max_x = int(center[1] - crop_size[1] // 2), int(center[1] + crop_size[1] // 2)
img_cropped = img[min_y:max_y, min_x:max_x]
return img_cropped
def crop_img(img, relative_corners):
""" relative_corners are floats between 0 and 1 designating where the corners of a crop box
should be ([[top_left_x, top_left_y], [bottom_right_x, bottom_right_y]]).
e.g. [[0, 0], [1, 1]] would be the full image, [[0.5, 0.5], [1, 1]] would be bottom right."""
rc = relative_corners
raw_height, raw_width = img.shape[:2]
top_left_pix = [int(rc[0][0] * raw_width), int(rc[0][1] * raw_height)]
bottom_right_pix = [int(rc[1][0] * raw_width), int(rc[1][1] * raw_height)]
img_cropped = img[top_left_pix[1]:bottom_right_pix[1], top_left_pix[0]:bottom_right_pix[0]]
return img_cropped | 2.96875 | 3 |
tests/sentry/utils/http/tests.py | arya-s/sentry | 1 | 4302 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture
from sentry import options
from sentry.models import Project
from sentry.testutils import TestCase
from sentry.utils.http import (
is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip,
)
class AbsoluteUriTest(TestCase):
def test_without_path(self):
assert absolute_uri() == options.get('system.url-prefix')
def test_with_path(self):
assert absolute_uri('/foo/bar') == '%s/foo/bar' % (options.get('system.url-prefix'),)
class SameDomainTestCase(TestCase):
def test_is_same_domain(self):
url1 = 'http://example.com/foo/bar'
url2 = 'http://example.com/biz/baz'
self.assertTrue(is_same_domain(url1, url2))
def test_is_same_domain_diff_scheme(self):
url1 = 'https://example.com/foo/bar'
url2 = 'http://example.com/biz/baz'
self.assertTrue(is_same_domain(url1, url2))
def test_is_same_domain_diff_port(self):
url1 = 'http://example.com:80/foo/bar'
url2 = 'http://example.com:13/biz/baz'
self.assertFalse(is_same_domain(url1, url2))
class GetOriginsTestCase(TestCase):
def test_project_default(self):
project = Project.objects.get()
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(project)
self.assertEquals(result, frozenset(['*']))
def test_project(self):
project = Project.objects.get()
project.update_option('sentry:origins', [u'http://foo.example'])
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(project)
self.assertEquals(result, frozenset(['http://foo.example']))
def test_project_and_setting(self):
project = Project.objects.get()
project.update_option('sentry:origins', [u'http://foo.example'])
with self.settings(SENTRY_ALLOW_ORIGIN='http://example.com'):
result = get_origins(project)
self.assertEquals(result, frozenset(['http://foo.example', 'http://example.com']))
def test_setting_empty(self):
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(None)
self.assertEquals(result, frozenset([]))
def test_setting_all(self):
with self.settings(SENTRY_ALLOW_ORIGIN='*'):
result = get_origins(None)
self.assertEquals(result, frozenset(['*']))
def test_setting_uri(self):
with self.settings(SENTRY_ALLOW_ORIGIN='http://example.com'):
result = get_origins(None)
self.assertEquals(result, frozenset(['http://example.com']))
class IsValidOriginTestCase(TestCase):
@fixture
def project(self):
return mock.Mock()
def isValidOrigin(self, origin, inputs):
with mock.patch('sentry.utils.http.get_origins') as get_origins:
get_origins.return_value = inputs
result = is_valid_origin(origin, self.project)
get_origins.assert_called_once_with(self.project)
return result
def test_global_wildcard_matches_domain(self):
result = self.isValidOrigin('http://example.com', ['*'])
self.assertEquals(result, True)
def test_domain_wildcard_matches_domain(self):
result = self.isValidOrigin('http://example.com', ['*.example.com'])
self.assertEquals(result, True)
def test_domain_wildcard_matches_domain_with_port(self):
result = self.isValidOrigin('http://example.com:80', ['*.example.com'])
self.assertEquals(result, True)
def test_domain_wildcard_matches_subdomain(self):
result = self.isValidOrigin('http://foo.example.com', ['*.example.com'])
self.assertEquals(result, True)
def test_domain_wildcard_matches_subdomain_with_port(self):
result = self.isValidOrigin('http://foo.example.com:80', ['*.example.com'])
self.assertEquals(result, True)
def test_domain_wildcard_does_not_match_others(self):
result = self.isValidOrigin('http://foo.com', ['*.example.com'])
self.assertEquals(result, False)
def test_domain_wildcard_matches_domain_with_path(self):
result = self.isValidOrigin('http://foo.example.com/foo/bar', ['*.example.com'])
self.assertEquals(result, True)
def test_base_domain_matches_domain(self):
result = self.isValidOrigin('http://example.com', ['example.com'])
self.assertEquals(result, True)
def test_base_domain_matches_domain_with_path(self):
result = self.isValidOrigin('http://example.com/foo/bar', ['example.com'])
self.assertEquals(result, True)
def test_base_domain_matches_domain_with_port(self):
result = self.isValidOrigin('http://example.com:80', ['example.com'])
self.assertEquals(result, True)
def test_base_domain_matches_domain_with_explicit_port(self):
result = self.isValidOrigin('http://example.com:80', ['example.com:80'])
assert result is True
def test_base_domain_does_not_match_domain_with_invalid_port(self):
result = self.isValidOrigin('http://example.com:80', ['example.com:443'])
assert result is False
def test_base_domain_does_not_match_subdomain(self):
result = self.isValidOrigin('http://example.com', ['foo.example.com'])
self.assertEquals(result, False)
def test_full_uri_match(self):
result = self.isValidOrigin('http://example.com', ['http://example.com'])
self.assertEquals(result, True)
def test_full_uri_match_requires_scheme(self):
result = self.isValidOrigin('https://example.com', ['http://example.com'])
self.assertEquals(result, False)
def test_full_uri_match_does_not_require_port(self):
result = self.isValidOrigin('http://example.com:80', ['http://example.com'])
self.assertEquals(result, True)
def test_partial_uri_match(self):
result = self.isValidOrigin('http://example.com/foo/bar', ['http://example.com'])
self.assertEquals(result, True)
def test_null_valid_with_global(self):
result = self.isValidOrigin('null', ['*'])
self.assertEquals(result, True)
def test_null_invalid_graceful_with_domains(self):
result = self.isValidOrigin('null', ['http://example.com'])
self.assertEquals(result, False)
def test_custom_protocol_with_location(self):
result = self.isValidOrigin('sp://custom-thing/foo/bar', ['sp://custom-thing'])
assert result is True
result = self.isValidOrigin('sp://custom-thing-two/foo/bar', ['sp://custom-thing'])
assert result is False
def test_custom_protocol_without_location(self):
result = self.isValidOrigin('sp://custom-thing/foo/bar', ['sp://*'])
assert result is True
result = self.isValidOrigin('dp://custom-thing/foo/bar', ['sp://'])
assert result is False
def test_custom_protocol_with_domainish_match(self):
result = self.isValidOrigin('sp://custom-thing.foobar/foo/bar', ['sp://*.foobar'])
assert result is True
result = self.isValidOrigin('sp://custom-thing.bizbaz/foo/bar', ['sp://*.foobar'])
assert result is False
def test_unicode(self):
result = self.isValidOrigin(u'http://l\xf8calhost', [u'*.l\xf8calhost'])
assert result is True
def test_punycode(self):
result = self.isValidOrigin('http://xn--lcalhost-54a', [u'*.l\xf8calhost'])
assert result is True
result = self.isValidOrigin('http://xn--lcalhost-54a', [u'*.xn--lcalhost-54a'])
assert result is True
result = self.isValidOrigin(u'http://l\xf8calhost', [u'*.xn--lcalhost-54a'])
assert result is True
result = self.isValidOrigin('http://l\xc3\xb8calhost', [u'*.xn--lcalhost-54a'])
assert result is True
result = self.isValidOrigin('http://xn--lcalhost-54a', [u'l\xf8calhost'])
assert result is True
result = self.isValidOrigin('http://xn--lcalhost-54a:80', [u'l\xf8calhost:80'])
assert result is True
def test_unparseable_uri(self):
result = self.isValidOrigin('http://example.com', ['.'])
assert result is False
class IsValidIPTestCase(TestCase):
def is_valid_ip(self, ip, inputs):
self.project.update_option('sentry:blacklisted_ips', inputs)
return is_valid_ip(ip, self.project)
def test_not_in_blacklist(self):
assert self.is_valid_ip('127.0.0.1', [])
assert self.is_valid_ip('127.0.0.1', ['0.0.0.0', '192.168.1.1', '10.0.0.0/8'])
def test_match_blacklist(self):
assert not self.is_valid_ip('127.0.0.1', ['127.0.0.1'])
assert not self.is_valid_ip('127.0.0.1', ['0.0.0.0', '127.0.0.1', '192.168.1.1'])
def test_match_blacklist_range(self):
assert not self.is_valid_ip('127.0.0.1', ['127.0.0.0/8'])
assert not self.is_valid_ip('127.0.0.1', ['0.0.0.0', '127.0.0.0/8', '192.168.1.0/8'])
| 2.265625 | 2 |
comcenterproject/project/helpers.py | tongpa/bantak_program | 0 | 4303 | <gh_stars>0
# -*- coding: utf-8 -*-
"""WebHelpers used in project."""
#from webhelpers import date, feedgenerator, html, number, misc, text
from markupsafe import Markup
def bold(text):
return Markup('<strong>%s</strong>' % text) | 1.757813 | 2 |
Thesis/load/runRiakLoads.py | arnaudsjs/YCSB-1 | 0 | 4304 | import sys;
from Thesis.load.loadBenchmark import runLoadBenchmarkAsBatch;
from Thesis.cluster.RiakCluster import RiakCluster;
NORMAL_BINDING = 'riak';
CONSISTENCY_BINDING = 'riak_consistency';
IPS_IN_CLUSTER = ['172.16.33.14', '172.16.33.15', '172.16.33.16', '172.16.33.17', '172.16.33.18'];
def main():
if len(sys.argv) < 7:
printUsageAndExit();
pathToWorkloadFile = sys.argv[1];
dirToWriteResultTo = sys.argv[2];
runtimeBenchmarkInMinutes = int(sys.argv[3]);
listOfOpsPerSec = sys.argv[4].split(',');
listOfAmountThreads = sys.argv[5].split(',');
listOfAmountOfMachines = sys.argv[6].split(',');
if len(sys.argv) >= 8:
remoteYcsbNodes = sys.argv[7].split(',');
else:
remoteYcsbNodes = [];
cluster = RiakCluster(NORMAL_BINDING, CONSISTENCY_BINDING, IPS_IN_CLUSTER);
runLoadBenchmarkAsBatch(cluster, remoteYcsbNodes, pathToWorkloadFile,
runtimeBenchmarkInMinutes, dirToWriteResultTo,
listOfOpsPerSec, listOfAmountThreads, listOfAmountOfMachines);
def printUsageAndExit():
print 'usage: binary <path workload file> <result dir> <runtime benchmark> <list of #ops> <list of #threads> <list of #machines> [<list remote ycsb nodes>]';
exit();
cluster = RiakCluster(NORMAL_BINDING, CONSISTENCY_BINDING, IPS_IN_CLUSTER);
runLoadBenchmarkAsBatch(cluster, ['172.16.33.10'], '/root/YCSB/workloads/workload_load',
3, '/root/YCSB/loads/riak',
['1000000000'], ['1'], ['1']);
# main(); | 2.0625 | 2 |
auto_nag/tests/test_round_robin.py | Mozilla-GitHub-Standards/f9c78643f5862cda82001d4471255ac29ef0c6b2c6171e2c1cbecab3d2fef4dd | 0 | 4305 | # coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from mock import patch
from auto_nag.people import People
from auto_nag.round_robin import BadFallback, RoundRobin
class TestRoundRobin(unittest.TestCase):
config = {
'doc': 'The triagers need to have a \'Fallback\' entry.',
'triagers': {
'A B': {'bzmail': '<EMAIL>'},
'C D': {'bzmail': '<EMAIL>'},
'E F': {'bzmail': '<EMAIL>'},
'Fallback': {'bzmail': '<EMAIL>'},
},
'components': {'P1::C1': 'default', 'P2::C2': 'default', 'P3::C3': 'special'},
'default': {
'doc': 'All the dates are the duty end dates.',
'2019-02-21': 'A B',
'2019-02-28': 'C D',
'2019-03-07': 'E F',
},
'special': {
'doc': 'All the dates are the duty end dates.',
'2019-02-21': 'E F',
'2019-02-28': 'A B',
'2019-03-07': 'C D',
},
}
people = People(
[
{
'mail': '<EMAIL>',
'cn': 'G H',
'ismanager': 'FALSE',
'title': 'nothing',
}
]
)
def mk_bug(self, pc):
p, c = pc.split('::')
return {
'product': p,
'component': c,
'triage_owner': '<EMAIL>',
'triage_owner_detail': {'nick': 'ij'},
}
@staticmethod
def _get_nick(x, bzmail):
return bzmail.split('@')[0]
def test_get(self):
with patch.object(RoundRobin, 'get_nick', new=TestRoundRobin._get_nick):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-17') == (
'<EMAIL>',
'ab',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-17') == (
'<EMAIL>',
'ab',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-17') == (
'<EMAIL>',
'ef',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-24') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-24') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-24') == (
'<EMAIL>',
'ab',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-28') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-28') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-28') == (
'<EMAIL>',
'ab',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-03-05') == (
'<EMAIL>',
'ef',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-03-05') == (
'<EMAIL>',
'ef',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-03-05') == (
'<EMAIL>',
'cd',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-03-08') == (
'<EMAIL>',
'gh',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-03-08') == (
'<EMAIL>',
'gh',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-03-08') == (
'<EMAIL>',
'gh',
)
assert rr.get(self.mk_bug('Foo::Bar'), '2019-03-01') == (
'<EMAIL>',
'ij',
)
def test_get_who_to_nag(self):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
assert rr.get_who_to_nag('2019-02-25') == {}
assert rr.get_who_to_nag('2019-02-28') == {'<EMAIL>': ['']}
assert rr.get_who_to_nag('2019-03-05') == {'<EMAIL>': ['']}
assert rr.get_who_to_nag('2019-03-07') == {'<EMAIL>': ['']}
assert rr.get_who_to_nag('2019-03-10') == {'<EMAIL>': ['']}
with patch.object(RoundRobin, 'is_mozilla', return_value=False):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
self.assertRaises(BadFallback, rr.get_who_to_nag, '2019-03-01')
| 2.234375 | 2 |
scipy/weave/inline_tools.py | tacaswell/scipy | 1 | 4306 | # should re-write compiled functions to take a local and global dict
# as input.
from __future__ import absolute_import, print_function
import sys
import os
from . import ext_tools
from . import catalog
from . import common_info
from numpy.core.multiarray import _get_ndarray_c_version
ndarray_api_version = '/* NDARRAY API VERSION %x */' % (_get_ndarray_c_version(),)
# not an easy way for the user_path_list to come in here.
# the PYTHONCOMPILED environment variable offers the most hope.
function_catalog = catalog.catalog()
class inline_ext_function(ext_tools.ext_function):
# Some specialization is needed for inline extension functions
def function_declaration_code(self):
code = 'static PyObject* %s(PyObject*self, PyObject* args)\n{\n'
return code % self.name
def template_declaration_code(self):
code = 'template<class T>\n' \
'static PyObject* %s(PyObject*self, PyObject* args)\n{\n'
return code % self.name
def parse_tuple_code(self):
""" Create code block for PyArg_ParseTuple. Variable declarations
for all PyObjects are done also.
This code got a lot uglier when I added local_dict...
"""
declare_return = 'py::object return_val;\n' \
'int exception_occurred = 0;\n' \
'PyObject *py__locals = NULL;\n' \
'PyObject *py__globals = NULL;\n'
py_objects = ', '.join(self.arg_specs.py_pointers())
if py_objects:
declare_py_objects = 'PyObject ' + py_objects + ';\n'
else:
declare_py_objects = ''
py_vars = ' = '.join(self.arg_specs.py_variables())
if py_vars:
init_values = py_vars + ' = NULL;\n\n'
else:
init_values = ''
parse_tuple = 'if(!PyArg_ParseTuple(args,"OO:compiled_func",'\
'&py__locals,'\
'&py__globals))\n'\
' return NULL;\n'
return declare_return + declare_py_objects + \
init_values + parse_tuple
def arg_declaration_code(self):
"""Return the declaration code as a string."""
arg_strings = [arg.declaration_code(inline=1)
for arg in self.arg_specs]
return "".join(arg_strings)
def arg_cleanup_code(self):
"""Return the cleanup code as a string."""
arg_strings = [arg.cleanup_code() for arg in self.arg_specs]
return "".join(arg_strings)
def arg_local_dict_code(self):
"""Return the code to create the local dict as a string."""
arg_strings = [arg.local_dict_code() for arg in self.arg_specs]
return "".join(arg_strings)
def function_code(self):
from .ext_tools import indent
decl_code = indent(self.arg_declaration_code(),4)
cleanup_code = indent(self.arg_cleanup_code(),4)
function_code = indent(self.code_block,4)
# local_dict_code = indent(self.arg_local_dict_code(),4)
try_code = \
' try \n' \
' { \n' \
'#if defined(__GNUC__) || defined(__ICC)\n' \
' PyObject* raw_locals __attribute__ ((unused));\n' \
' PyObject* raw_globals __attribute__ ((unused));\n' \
'#else\n' \
' PyObject* raw_locals;\n' \
' PyObject* raw_globals;\n' \
'#endif\n' \
' raw_locals = py_to_raw_dict(py__locals,"_locals");\n' \
' raw_globals = py_to_raw_dict(py__globals,"_globals");\n' \
' /* argument conversion code */ \n' \
+ decl_code + \
' /* inline code */ \n' \
+ function_code + \
' /*I would like to fill in changed locals and globals here...*/ \n' \
' }\n'
catch_code = "catch(...) \n" \
"{ \n" + \
" return_val = py::object(); \n" \
" exception_occurred = 1; \n" \
"} \n"
return_code = " /* cleanup code */ \n" + \
cleanup_code + \
" if(!(PyObject*)return_val && !exception_occurred)\n" \
" {\n \n" \
" return_val = Py_None; \n" \
" }\n \n" \
" return return_val.disown(); \n" \
"} \n"
all_code = self.function_declaration_code() + \
indent(self.parse_tuple_code(),4) + \
try_code + \
indent(catch_code,4) + \
return_code
return all_code
def python_function_definition_code(self):
args = (self.name, self.name)
function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS},\n' % args
return function_decls
class inline_ext_module(ext_tools.ext_module):
def __init__(self,name,compiler=''):
ext_tools.ext_module.__init__(self,name,compiler)
self._build_information.append(common_info.inline_info())
function_cache = {}
def inline(code,arg_names=[],local_dict=None, global_dict=None,
force=0,
compiler='',
verbose=0,
support_code=None,
headers=[],
customize=None,
type_converters=None,
auto_downcast=1,
newarr_converter=0,
**kw):
"""
Inline C/C++ code within Python scripts.
``inline()`` compiles and executes C/C++ code on the fly. Variables
in the local and global Python scope are also available in the
C/C++ code. Values are passed to the C/C++ code by assignment
much like variables passed are passed into a standard Python
function. Values are returned from the C/C++ code through a
special argument called return_val. Also, the contents of
mutable objects can be changed within the C/C++ code and the
changes remain after the C code exits and returns to Python.
inline has quite a few options as listed below. Also, the keyword
arguments for distutils extension modules are accepted to
specify extra information needed for compiling.
Parameters
----------
code : string
A string of valid C++ code. It should not specify a return
statement. Instead it should assign results that need to be
returned to Python in the `return_val`.
arg_names : [str], optional
A list of Python variable names that should be transferred from
Python into the C/C++ code. It defaults to an empty string.
local_dict : dict, optional
If specified, it is a dictionary of values that should be used as
the local scope for the C/C++ code. If local_dict is not
specified the local dictionary of the calling function is used.
global_dict : dict, optional
If specified, it is a dictionary of values that should be used as
the global scope for the C/C++ code. If `global_dict` is not
specified, the global dictionary of the calling function is used.
force : {0, 1}, optional
If 1, the C++ code is compiled every time inline is called. This
is really only useful for debugging, and probably only useful if
your editing `support_code` a lot.
compiler : str, optional
The name of compiler to use when compiling. On windows, it
understands 'msvc' and 'gcc' as well as all the compiler names
understood by distutils. On Unix, it'll only understand the
values understood by distutils. (I should add 'gcc' though to
this).
On windows, the compiler defaults to the Microsoft C++ compiler.
If this isn't available, it looks for mingw32 (the gcc compiler).
On Unix, it'll probably use the same compiler that was used when
compiling Python. Cygwin's behavior should be similar.
verbose : {0,1,2}, optional
Specifies how much information is printed during the compile
phase of inlining code. 0 is silent (except on windows with msvc
where it still prints some garbage). 1 informs you when compiling
starts, finishes, and how long it took. 2 prints out the command
lines for the compilation process and can be useful if your having
problems getting code to work. Its handy for finding the name of
the .cpp file if you need to examine it. verbose has no effect if
the compilation isn't necessary.
support_code : str, optional
A string of valid C++ code declaring extra code that might be
needed by your compiled function. This could be declarations of
functions, classes, or structures.
headers : [str], optional
A list of strings specifying header files to use when compiling
the code. The list might look like ``["<vector>","'my_header'"]``.
Note that the header strings need to be in a form than can be
pasted at the end of a ``#include`` statement in the C++ code.
customize : base_info.custom_info, optional
An alternative way to specify `support_code`, `headers`, etc. needed
by the function. See :mod:`scipy.weave.base_info` for more
details. (not sure this'll be used much).
type_converters : [type converters], optional
These guys are what convert Python data types to C/C++ data types.
If you'd like to use a different set of type conversions than the
default, specify them here. Look in the type conversions section
of the main documentation for examples.
auto_downcast : {1,0}, optional
This only affects functions that have numpy arrays as input
variables. Setting this to 1 will cause all floating point values
to be cast as float instead of double if all the Numeric arrays
are of type float. If even one of the arrays has type double or
double complex, all variables maintain their standard
types.
newarr_converter : int, optional
Unused.
Other Parameters
----------------
Relevant :mod:`distutils` keywords. These are duplicated from <NAME>'s
:class:`distutils.extension.Extension` class for convenience:
sources : [string]
List of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
.. note:: The `module_path` file is always appended to the front of
this list
include_dirs : [string]
List of directories to search for C/C++ header files (in Unix
form for portability).
define_macros : [(name : string, value : string|None)]
List of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line).
undef_macros : [string]
List of macros to undefine explicitly.
library_dirs : [string]
List of directories to search for C/C++ libraries at link time.
libraries : [string]
List of library names (not filenames or paths) to link against.
runtime_library_dirs : [string]
List of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded).
extra_objects : [string]
List of extra files to link with (e.g. object files not implied
by 'sources', static libraries that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
Any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
Any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
List of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
Any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
List of files that the extension depends on.
language : string
Extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
See Also
--------
distutils.extension.Extension : Describes additional parameters.
"""
# this grabs the local variables from the *previous* call
# frame -- that is the locals from the function that called
# inline.
global function_catalog
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
if force:
module_dir = global_dict.get('__file__',None)
func = compile_function(code,arg_names,local_dict,
global_dict,module_dir,
compiler=compiler,
verbose=verbose,
support_code=support_code,
headers=headers,
customize=customize,
type_converters=type_converters,
auto_downcast=auto_downcast,
**kw)
function_catalog.add_function(code,func,module_dir)
results = attempt_function_call(code,local_dict,global_dict)
else:
# 1. try local cache
try:
results = apply(function_cache[code],(local_dict,global_dict))
return results
except TypeError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
except KeyError:
pass
# 2. try function catalog
try:
results = attempt_function_call(code,local_dict,global_dict)
# 3. build the function
except ValueError:
# compile the library
module_dir = global_dict.get('__file__',None)
func = compile_function(code,arg_names,local_dict,
global_dict,module_dir,
compiler=compiler,
verbose=verbose,
support_code=support_code,
headers=headers,
customize=customize,
type_converters=type_converters,
auto_downcast=auto_downcast,
**kw)
function_catalog.add_function(code,func,module_dir)
results = attempt_function_call(code,local_dict,global_dict)
return results
def attempt_function_call(code,local_dict,global_dict):
# we try 3 levels here -- a local cache first, then the
# catalog cache, and then persistent catalog.
#
global function_catalog
# 1. try local cache
try:
results = apply(function_cache[code],(local_dict,global_dict))
return results
except TypeError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
except KeyError:
pass
# 2. try catalog cache.
function_list = function_catalog.get_functions_fast(code)
for func in function_list:
try:
results = apply(func,(local_dict,global_dict))
function_catalog.fast_cache(code,func)
function_cache[code] = func
return results
except TypeError as msg: # should specify argument types here.
# This should really have its own error type, instead of
# checking the beginning of the message, but I don't know
# how to define that yet.
msg = str(msg)
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
# 3. try persistent catalog
module_dir = global_dict.get('__file__',None)
function_list = function_catalog.get_functions(code,module_dir)
for func in function_list:
try:
results = apply(func,(local_dict,global_dict))
function_catalog.fast_cache(code,func)
function_cache[code] = func
return results
except: # should specify argument types here.
pass
# if we get here, the function wasn't found
raise ValueError('function with correct signature not found')
def inline_function_code(code,arg_names,local_dict=None,
global_dict=None,auto_downcast=1,
type_converters=None,compiler=''):
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
ext_func = inline_ext_function('compiled_func',code,arg_names,
local_dict,global_dict,auto_downcast,
type_converters=type_converters)
from . import build_tools
compiler = build_tools.choose_compiler(compiler)
ext_func.set_compiler(compiler)
return ext_func.function_code()
def compile_function(code,arg_names,local_dict,global_dict,
module_dir,
compiler='',
verbose=1,
support_code=None,
headers=[],
customize=None,
type_converters=None,
auto_downcast=1,
**kw):
# figure out where to store and what to name the extension module
# that will contain the function.
# storage_dir = catalog.intermediate_dir()
code = ndarray_api_version + '\n' + code
module_path = function_catalog.unique_module_name(code, module_dir)
storage_dir, module_name = os.path.split(module_path)
mod = inline_ext_module(module_name,compiler)
# create the function. This relies on the auto_downcast and
# type factories setting
ext_func = inline_ext_function('compiled_func',code,arg_names,
local_dict,global_dict,auto_downcast,
type_converters=type_converters)
mod.add_function(ext_func)
# if customize (a custom_info object), then set the module customization.
if customize:
mod.customize = customize
# add the extra "support code" needed by the function to the module.
if support_code:
mod.customize.add_support_code(support_code)
# add the extra headers needed by the function to the module.
for header in headers:
mod.customize.add_header(header)
# it's nice to let the users know when anything gets compiled, as the
# slowdown is very noticeable.
if verbose > 0:
print('<weave: compiling>')
# compile code in correct location, with the given compiler and verbosity
# setting. All input keywords are passed through to distutils
mod.compile(location=storage_dir,compiler=compiler,
verbose=verbose, **kw)
# import the module and return the function. Make sure
# the directory where it lives is in the python path.
try:
sys.path.insert(0,storage_dir)
exec('import ' + module_name)
func = eval(module_name+'.compiled_func')
finally:
del sys.path[0]
return func
| 2.296875 | 2 |
trove/guestagent/common/configuration.py | sapcc/trove | 1 | 4307 | <reponame>sapcc/trove
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import re
import six
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
class ConfigurationManager(object):
"""
ConfigurationManager is responsible for management of
datastore configuration.
Its base functionality includes reading and writing configuration files.
It is responsible for validating user inputs and requests.
When supplied an override strategy it allows the user to manage
configuration overrides as well.
"""
# Configuration group names. The names determine the order in which the
# groups get applied. System groups are divided into two camps; pre-user
# and post-user. In general system overrides will get applied over the
# user group, unless specified otherwise (i.e. SYSTEM_POST_USER_GROUP
# will be used).
SYSTEM_PRE_USER_GROUP = '10-system'
USER_GROUP = '20-user'
SYSTEM_POST_USER_GROUP = '50-system'
DEFAULT_STRATEGY_OVERRIDES_SUB_DIR = 'overrides'
DEFAULT_CHANGE_ID = 'common'
def __init__(self, base_config_path, owner, group, codec,
requires_root=False, override_strategy=None):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration files.
:type owner string
:param group Group of the configuration files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the manager requires superuser
privileges.
:type requires_root boolean
:param override_strategy Strategy used to manage configuration
overrides (e.g. ImportOverrideStrategy).
Defaults to OneFileOverrideStrategy
if None. This strategy should be
compatible with very much any datastore.
It is recommended each datastore defines
its strategy explicitly to avoid upgrade
compatibility issues in case the default
implementation changes in the future.
:type override_strategy ConfigurationOverrideStrategy
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
self._value_cache = None
if not override_strategy:
# Use OneFile strategy by default. Store the revisions in a
# sub-directory at the location of the configuration file.
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(base_config_path),
self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
self._override_strategy = OneFileOverrideStrategy(revision_dir)
else:
self._override_strategy = override_strategy
self._override_strategy.configure(
base_config_path, owner, group, codec, requires_root)
def get_value(self, key, default=None):
"""Return the current value at a given key or 'default'.
"""
if self._value_cache is None:
self.refresh_cache()
return self._value_cache.get(key, default)
def parse_configuration(self):
"""Read contents of the configuration file (applying overrides if any)
and parse it into a dict.
:returns: Configuration file as a Python dict.
"""
base_options = operating_system.read_file(
self._base_config_path, codec=self._codec,
as_root=self._requires_root)
updates = self._override_strategy.parse_updates()
guestagent_utils.update_dict(updates, base_options)
return base_options
def save_configuration(self, options):
"""Write given contents to the base configuration file.
Remove all existing overrides (both system and user).
:param contents Contents of the configuration file.
:type contents string or dict
"""
if isinstance(options, dict):
# Serialize a dict of options for writing.
self.save_configuration(self._codec.serialize(options))
else:
self._override_strategy.remove(self.USER_GROUP)
self._override_strategy.remove(self.SYSTEM_PRE_USER_GROUP)
self._override_strategy.remove(self.SYSTEM_POST_USER_GROUP)
operating_system.write_file(
self._base_config_path, options, as_root=self._requires_root)
operating_system.chown(
self._base_config_path, self._owner, self._group,
as_root=self._requires_root)
operating_system.chmod(
self._base_config_path, FileMode.ADD_READ_ALL,
as_root=self._requires_root)
self.refresh_cache()
def has_system_override(self, change_id):
"""Return whether a given 'system' change exists.
"""
return (self._override_strategy.exists(self.SYSTEM_POST_USER_GROUP,
change_id) or
self._override_strategy.exists(self.SYSTEM_PRE_USER_GROUP,
change_id))
def apply_system_override(self, options, change_id=DEFAULT_CHANGE_ID,
pre_user=False):
"""Apply a 'system' change to the configuration.
System overrides are always applied after all user changes so that
they override any user-defined setting.
:param options Configuration changes.
:type options string or dict
"""
group_name = (
self.SYSTEM_PRE_USER_GROUP if pre_user else
self.SYSTEM_POST_USER_GROUP)
self._apply_override(group_name, change_id, options)
def apply_user_override(self, options, change_id=DEFAULT_CHANGE_ID):
"""Apply a 'user' change to the configuration.
The 'system' values will be re-applied over this override.
:param options Configuration changes.
:type options string or dict
"""
self._apply_override(self.USER_GROUP, change_id, options)
def get_user_override(self, change_id=DEFAULT_CHANGE_ID):
"""Get the user overrides"""
return self._override_strategy.get(self.USER_GROUP, change_id)
def _apply_override(self, group_name, change_id, options):
if not isinstance(options, dict):
# Deserialize the options into a dict if not already.
self._apply_override(
group_name, change_id, self._codec.deserialize(options))
else:
self._override_strategy.apply(group_name, change_id, options)
self.refresh_cache()
def remove_system_override(self, change_id=DEFAULT_CHANGE_ID):
"""Revert a 'system' configuration change.
"""
self._remove_override(self.SYSTEM_POST_USER_GROUP, change_id)
self._remove_override(self.SYSTEM_PRE_USER_GROUP, change_id)
def remove_user_override(self, change_id=DEFAULT_CHANGE_ID):
"""Revert a 'user' configuration change.
"""
self._remove_override(self.USER_GROUP, change_id)
def _remove_override(self, group_name, change_id):
self._override_strategy.remove(group_name, change_id)
self.refresh_cache()
def refresh_cache(self):
self._value_cache = self.parse_configuration()
@six.add_metaclass(abc.ABCMeta)
class ConfigurationOverrideStrategy(object):
"""ConfigurationOverrideStrategy handles configuration files.
The strategy provides functionality to enumerate, apply and remove
configuration overrides.
"""
@abc.abstractmethod
def configure(self, *args, **kwargs):
"""Configure this strategy.
A strategy needs to be configured before it can be used.
It would typically be configured by the ConfigurationManager.
"""
@abc.abstractmethod
def exists(self, group_name, change_id):
"""Return whether a given revision exists.
"""
@abc.abstractmethod
def apply(self, group_name, change_id, options):
"""Apply given options on the most current configuration revision.
Update if a file with the same id already exists.
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
:param options Configuration changes.
:type options dict
"""
@abc.abstractmethod
def remove(self, group_name, change_id=None):
"""Rollback a given configuration override.
Remove the whole group if 'change_id' is None.
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
"""
@abc.abstractmethod
def get(self, group_name, change_id=None):
"""Return the contents of a given configuration override
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
"""
def parse_updates(self):
"""Return all updates applied to the base revision as a single dict.
Return an empty dict if the base file is always the most current
version of configuration.
:returns: Updates to the base revision as a Python dict.
"""
return {}
class ImportOverrideStrategy(ConfigurationOverrideStrategy):
"""Import strategy keeps overrides in separate files that get imported
into the base configuration file which never changes itself.
An override file is simply deleted when the override is removed.
We keep two sets of override files in a separate directory.
- User overrides - configuration overrides applied by the user via the
Trove API.
- System overrides - 'internal' configuration changes applied by the
guestagent.
The name format of override files is: '<set prefix>-<n>-<group name>.<ext>'
where 'set prefix' is to used to order user/system sets,
'n' is an index used to keep track of the order in which overrides
within their set got applied.
"""
FILE_NAME_PATTERN = r'%s-([0-9]+)-%s\.%s$'
def __init__(self, revision_dir, revision_ext):
"""
:param revision_dir Path to the directory for import files.
:type revision_dir string
:param revision_ext Extension of revision files.
:type revision_ext string
"""
self._revision_dir = revision_dir
self._revision_ext = revision_ext
def configure(self, base_config_path, owner, group, codec, requires_root):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration and
revision files.
:type owner string
:param group Group of the configuration and
revision files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the strategy requires superuser
privileges.
:type requires_root boolean
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
def exists(self, group_name, change_id):
return self._find_revision_file(group_name, change_id) is not None
def apply(self, group_name, change_id, options):
self._initialize_import_directory()
revision_file = self._find_revision_file(group_name, change_id)
if revision_file is None:
# Create a new file.
last_revision_index = self._get_last_file_index(group_name)
revision_file = guestagent_utils.build_file_path(
self._revision_dir,
'%s-%03d-%s' % (group_name, last_revision_index + 1,
change_id),
self._revision_ext)
else:
# Update the existing file.
current = operating_system.read_file(
revision_file, codec=self._codec, as_root=self._requires_root)
options = guestagent_utils.update_dict(options, current)
operating_system.write_file(
revision_file, options, codec=self._codec,
as_root=self._requires_root)
operating_system.chown(
revision_file, self._owner, self._group,
as_root=self._requires_root)
operating_system.chmod(
revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def _initialize_import_directory(self):
"""Lazy-initialize the directory for imported revision files.
"""
if not os.path.exists(self._revision_dir):
operating_system.create_directory(
self._revision_dir, user=self._owner, group=self._group,
force=True, as_root=self._requires_root)
def remove(self, group_name, change_id=None):
removed = set()
if change_id:
# Remove a given file.
revision_file = self._find_revision_file(group_name, change_id)
if revision_file:
removed.add(revision_file)
else:
# Remove the entire group.
removed = self._collect_revision_files(group_name)
for path in removed:
operating_system.remove(path, force=True,
as_root=self._requires_root)
def get(self, group_name, change_id):
revision_file = self._find_revision_file(group_name, change_id)
return operating_system.read_file(revision_file,
codec=self._codec,
as_root=self._requires_root)
def parse_updates(self):
parsed_options = {}
for path in self._collect_revision_files():
options = operating_system.read_file(path, codec=self._codec,
as_root=self._requires_root)
guestagent_utils.update_dict(options, parsed_options)
return parsed_options
@property
def has_revisions(self):
"""Return True if there currently are any revision files.
"""
return (operating_system.exists(
self._revision_dir, is_directory=True,
as_root=self._requires_root) and
(len(self._collect_revision_files()) > 0))
def _get_last_file_index(self, group_name):
"""Get the index of the most current file in a given group.
"""
current_files = self._collect_revision_files(group_name)
if current_files:
name_pattern = self._build_rev_name_pattern(group_name=group_name)
last_file_name = os.path.basename(current_files[-1])
last_index_match = re.match(name_pattern, last_file_name)
if last_index_match:
return int(last_index_match.group(1))
return 0
def _collect_revision_files(self, group_name='.+'):
"""Collect and return a sorted list of paths to existing revision
files. The files should be sorted in the same order in which
they were applied.
"""
name_pattern = self._build_rev_name_pattern(group_name=group_name)
return sorted(operating_system.list_files_in_directory(
self._revision_dir, recursive=True, pattern=name_pattern,
as_root=self._requires_root))
def _find_revision_file(self, group_name, change_id):
name_pattern = self._build_rev_name_pattern(group_name, change_id)
found = operating_system.list_files_in_directory(
self._revision_dir, recursive=True, pattern=name_pattern,
as_root=self._requires_root)
return next(iter(found), None)
def _build_rev_name_pattern(self, group_name='.+', change_id='.+'):
return self.FILE_NAME_PATTERN % (group_name, change_id,
self._revision_ext)
class OneFileOverrideStrategy(ConfigurationOverrideStrategy):
"""This is a strategy for datastores that do not support multiple
configuration files.
It uses the Import Strategy to keep the overrides internally.
When an override is applied or removed a new configuration file is
generated by applying all changes on a saved-off base revision.
"""
BASE_REVISION_NAME = 'base'
REVISION_EXT = 'rev'
def __init__(self, revision_dir):
"""
:param revision_dir Path to the directory for import files.
:type revision_dir string
"""
self._revision_dir = revision_dir
self._import_strategy = ImportOverrideStrategy(revision_dir,
self.REVISION_EXT)
def configure(self, base_config_path, owner, group, codec, requires_root):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration and
revision files.
:type owner string
:param group Group of the configuration and
revision files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the strategy requires superuser
privileges.
:type requires_root boolean
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
self._base_revision_file = guestagent_utils.build_file_path(
self._revision_dir, self.BASE_REVISION_NAME, self.REVISION_EXT)
self._import_strategy.configure(
base_config_path, owner, group, codec, requires_root)
def exists(self, group_name, change_id):
return self._import_strategy.exists(group_name, change_id)
def apply(self, group_name, change_id, options):
self._import_strategy.apply(group_name, change_id, options)
self._regenerate_base_configuration()
def remove(self, group_name, change_id=None):
if self._import_strategy.has_revisions:
self._import_strategy.remove(group_name, change_id=change_id)
self._regenerate_base_configuration()
if not self._import_strategy.has_revisions:
# The base revision file is no longer needed if there are no
# overrides. It will be regenerated based on the current
# configuration file on the first 'apply()'.
operating_system.remove(self._base_revision_file, force=True,
as_root=self._requires_root)
def get(self, group_name, change_id):
return self._import_strategy.get(group_name, change_id)
def _regenerate_base_configuration(self):
"""Gather all configuration changes and apply them in order on the base
revision. Write the results to the configuration file.
"""
if not os.path.exists(self._base_revision_file):
# Initialize the file with the current configuration contents if it
# does not exist.
operating_system.copy(
self._base_config_path, self._base_revision_file,
force=True, preserve=True, as_root=self._requires_root)
base_revision = operating_system.read_file(
self._base_revision_file, codec=self._codec,
as_root=self._requires_root)
changes = self._import_strategy.parse_updates()
updated_revision = guestagent_utils.update_dict(changes, base_revision)
operating_system.write_file(
self._base_config_path, updated_revision, codec=self._codec,
as_root=self._requires_root)
| 1.851563 | 2 |
API-Reference-Code-Generator.py | sawyercade/Documentation | 116 | 4308 | import pathlib
import yaml
documentations = {"Our Platform": "QuantConnect-Platform-2.0.0.yaml",
"Alpha Streams": "QuantConnect-Alpha-0.8.yaml"}
def RequestTable(api_call, params):
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{api_call}</code> Method</th>\n</tr>\n</thead>'
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n{\n'
for item in params:
example_ = "/"
description_ = "Optional. " if "required" not in item or not item["required"] else ""
description_ += item["description"]
if description_[-1] != ".":
description_ += "."
if "type" in item["schema"]:
type_ = item["schema"]["type"]
else:
type_ = item["schema"]["$ref"].split("/")[-1]
if "minimum" in item["schema"]:
description_ += f' Minimum: {item["schema"]["minimum"]}'
example_ = item["schema"]["minimum"]
elif "maximum" in item["schema"]:
description_ += f' Maximum: {item["schema"]["maximum"]}'
example_ = item["schema"]["maximum"]
elif "default" in item["schema"]:
description_ += f' Default: {item["schema"]["default"]}'
example_ = item["schema"]["default"]
if type_ == "array":
array_obj = item["schema"]["items"]
if "$ref" in array_obj:
type_ = array_obj["$ref"].split("/")[-1] + " Array"
ref = array_obj["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
request_object_ = doc
for path in ref:
request_object_ = request_object_[path]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
example_, __, __ = ExampleWriting(request_object_properties_, [], 1)
if "type" in array_obj:
type_ = array_obj["type"] + " Array"
if "enum" in array_obj:
type_ = type_ + " Enum"
description_ += f' Options: {str(array_obj["enum"])}'
example_ = f'"{array_obj["enum"][0]}"'
if "Enum" not in type_:
if "string" in type_:
example_ = '"string"'
elif "number" in type_ or "integer" in type_:
example_ = '0'
elif "boolean" in type_:
example_ = 'true'
writeUp += f'\n<tr>\n<td width="20%">{item["name"]}</td> <td> <code>{type_}</code><br/>{description_}</td>\n</tr>'
example += f' "{item["name"]}": {example_},\n'
return writeUp + example + "\b}</pre>\n</div>\n</td>\n</tr>\n</table>"
def ResponseTable(requestBody):
writeUp = ""
array = False
order = 0
if "content" in requestBody:
component = requestBody["content"]["application/json"]["schema"]
if "$ref" in component:
component = component["$ref"].split("/")[1:]
elif "items" in component and "$ref" in component["items"]:
component = component["items"]["$ref"].split("/")[1:]
array = True
order += 1
else:
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2">{requestBody["description"]}</th>\n'
writeUp += '</tr>\n</thead>\n'
writeUp += f'<tr>\n<td width="20%">value</td> <td> <code>{component["items"]["type"]}</code> <br/>/</td>\n</tr>\n'
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += f'[\n "{component["items"]["example"]}"\n]'
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
return writeUp
else:
component = requestBody["$ref"].split("/")[1:]
item_list = [component]
i = 0
while i < len(item_list):
request_object = doc
for item in item_list[i]:
request_object = request_object[item]
if "items" in request_object and "oneOf" in request_object["items"]:
prop = request_object["items"]["oneOf"]
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n[\n ['
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{item}</code> Model - {request_object["description"]}</th>\n'
writeUp += '</tr>\n</thead>'
for y in prop:
path = y["$ref"].split("/")[1:]
name = path[-1]
enum = ""
item_list.append(path)
request_object = doc
for item in path:
request_object = request_object[item]
if "enum" in request_object:
enum = " Options: " + str(request_object["enum"])
description_ = request_object["description"]
if description_[-1] != ".":
description_ += "."
writeUp += f'\n<tr>\n<td width="20%">{name}</td> <td> <code>{request_object["type"]}</code> <br/> {description_ + enum}</td>\n</tr>\n'
if "example" in request_object:
text = request_object["example"]
elif "enum" in request_object:
text = '"' + request_object["enum"][0] + '"'
example += f'\n {text},'
example += '\b\n ]\n]'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
continue
elif "oneOf" in request_object:
for y in request_object["oneOf"]:
item_list.append(y["$ref"].split("/")[1:])
i += 1
continue
elif "properties" in request_object:
request_object_properties = request_object["properties"]
elif "content" in request_object:
item_list.append(request_object["content"]["application/json"]["schema"]["$ref"].split("/")[1:])
i += 1
continue
elif "type" in request_object and "properties" not in request_object:
request_object_properties = {item: request_object}
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
if "description" in request_object:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model - {request_object["description"]}</th>\n'
else:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model</th>\n'
writeUp += '</tr>\n</thead>\n'
example, html_property, item_list = ExampleWriting(request_object_properties, item_list, array, order)
if array:
array = False
order -= 1
for line in html_property:
writeUp += line
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
return writeUp
def ExampleWriting(request_object_properties, item_list, array=False, order=0):
tab = " " * order
if array:
example = "[\n {\n"
else:
example = "{\n"
line = []
for name, properties in request_object_properties.items():
type_ = properties["type"] if "type" in properties else "object"
description_ = properties["description"] if "description" in properties else "/"
if (example != "{\n" and not array) or (example != "[\n {\n" and array):
example += ",\n"
example_ = tab + f' "{name}": '
if type_ == "array":
example_ += '[\n'
if "type" in properties["items"]:
type_ = properties["items"]["type"] + " Array"
example_ += tab + f' "{properties["items"]["type"]}"'
elif "$ref" in properties["items"]:
ref = properties["items"]["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+2)
example_ += tab + " " * 2 + write_up
elif type_ == "object":
if "additionalProperties" in properties:
add_prop = properties["additionalProperties"]
if "type" in add_prop:
prop_type = add_prop["type"]
if "format" in prop_type:
type_ = prop_type + f'$({prop_type["format"]})' + " object"
if prop_type["format"] == "date-time":
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += "0"
else:
type_ = prop_type + " object"
example_ += f'"{prop_type}"'
elif "$ref" in add_prop:
ref = add_prop["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "$ref" in properties:
ref = properties["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "type" in request_object_:
properties = request_object_properties_ = request_object_
type_ = request_object_["type"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
elif type_ == "integer" or type_ == "number":
example_ += "0"
elif type_ == "boolean":
example_ += "true"
elif type_ == "string":
if "format" in properties:
type_ += f'(${properties["format"]})'
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += '"string"'
if description_[-1] != ".":
description_ += "."
if "enum" in properties:
type_ += " Enum"
description_ += f' Options : {properties["enum"]}'
if "string" in type_:
example_ = tab + f' "{name}": "{properties["enum"][0]}"'
else:
example_ = tab + f' "{name}": {properties["enum"][0]}'
if "example" in properties:
eg = properties["example"]
type_ += f'<br/><i><sub>example: {eg}</sub></i>'
if isinstance(eg, str):
eg = '"' + eg + '"'
example_ = tab + f' "{name}": {eg}'
if "Array" in type_:
example_ += "\n" + tab + " ]"
if order == 0 or array:
line.append(f'<tr>\n<td width="20%">{name}</td> <td> <code>{type_}</code> <br/> {description_}</td>\n</tr>\n')
example += example_
if not array:
return example + "\n" + tab + "}", line, item_list
return example + "\n" + tab + "}\n" + " " * (order-1) + "]", line, item_list
for section, source in documentations.items():
yaml_file = open(source)
doc = yaml.load(yaml_file, Loader=yaml.Loader)
paths = doc["paths"]
for api_call, result in paths.items():
j = 1
content = result["post"] if "post" in result else result["get"]
# Create path if not exist
destination_folder = pathlib.Path("/".join(content["tags"]))
destination_folder.mkdir(parents=True, exist_ok=True)
# Create Introduction part
with open(destination_folder / f'{j:02} Introduction.html', "w") as html_file:
html_file.write("<p>\n")
html_file.write(f"{content['summary']}\n")
html_file.write("</p>\n")
j += 1
# Create Description part if having one
if "description" in content:
with open(destination_folder / f'{j:02} Description.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'{content["description"]}\n')
html_file.write('</p>\n')
j += 1
# Create Request part
with open(destination_folder / f'{j:02} Request.html', "w") as html_file:
description_ = ""
if "parameters" in content:
writeUp = RequestTable(api_call, content["parameters"])
elif "requestBody" in content:
if "description" in content["requestBody"]:
description_ = str(content["requestBody"]["description"])
if description_[-1] != ".":
description_ += "."
description_ += " "
writeUp = ResponseTable(content["requestBody"])
else:
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="1"><code>{api_call}</code> Method</th>\n</tr>\n</thead>\n'
writeUp += f'</tr>\n<td><code>{api_call}</code> method takes no parameters.</td>\n</tr>\n</table>'
description_ += f'The <code>{api_call}</code> API accepts requests in the following format:\n'
html_file.write("<p>\n" + description_ + "</p>\n")
html_file.write(writeUp)
j += 1
# Create Response part
with open(destination_folder / f'{j:02} Responses.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'The <code>{api_call}</code> API provides a response in the following format:\n')
html_file.write('</p>\n')
request_body = content["responses"]
for code, properties in request_body.items():
if code == "200":
html_file.write('<h4>200 Success</h4>\n')
elif code == "401":
html_file.write('<h4>401 Authentication Error</h4>\n<table class="table qc-table">\n<thead>\n<tr>\n')
html_file.write('<th colspan="2"><code>UnauthorizedError</code> Model - Unauthorized response from the API. Key is missing, invalid, or timestamp is too old for hash.</th>\n')
html_file.write('</tr>\n</thead>\n<tr>\n<td width="20%">www_authenticate</td> <td> <code>string</code> <br/> Header</td>\n</tr>\n</table>\n')
continue
elif code == "404":
html_file.write('<h4>404 Not Found Error</h4>\n')
html_file.write('<p>The requested item, index, page was not found.</p>\n')
continue
elif code == "default":
html_file.write('<h4>Default Generic Error</h4>\n')
writeUp = ResponseTable(properties)
html_file.write(writeUp)
print(f"Documentation of {section} is generated and inplace!") | 2.296875 | 2 |
forge_api_client/hubs.py | dmh126/forge-python-data-management-api | 1 | 4309 | from .utils import get_request, authorized
class Hubs:
@authorized
def getHubs(self):
url = self.api_url + '/project/v1/hubs'
headers = {
'Authorization': '%s %s' % (self.token_type, self.access_token)
}
return get_request(url, headers)
@authorized
def getHub(self, hub_id):
url = self.api_url + '/project/v1/hubs/%s' % hub_id
headers = {
'Authorization': '%s %s' % (self.token_type, self.access_token)
}
return get_request(url, headers)
| 2.578125 | 3 |
tlp/django_app/app/urls.py | munisisazade/create-django-app | 14 | 4310 | from django.conf.urls import url
# from .views import BaseIndexView
urlpatterns = [
# url(r'^$', BaseIndexView.as_view(), name="index"),
] | 1.445313 | 1 |
tools/archive/create_loadable_configs.py | madelinemccombe/iron-skillet | 0 | 4311 | <gh_stars>0
# Copyright (c) 2018, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: <NAME> <<EMAIL>>
'''
Palo Alto Networks create_loadable_configs.py
Provides rendering of configuration templates with user defined values
Output is a set of loadable full configurations and set commands for Panos and Panorama
Edit the config_variables.yaml values and then run the script
This software is provided without support, warranty, or guarantee.
Use at your own risk.
'''
import datetime
import os
import shutil
import sys
import time
import getpass
import oyaml
from jinja2 import Environment, FileSystemLoader
from passlib.hash import des_crypt
from passlib.hash import md5_crypt
from passlib.hash import sha256_crypt
from passlib.hash import sha512_crypt
defined_filters = ['md5_hash', 'des_hash', 'sha512_hash']
def myconfig_newdir(myconfigdir_name, foldertime):
'''
create a new main loadable_configs folder if required then new subdirectories for configs
:param myconfigdir_name: prefix folder name from the my_variables.py file
:param foldertime: datetime when script run; to be used as suffix of folder name
:return: the myconfigdir full path name
'''
# get the full path to the config directory we want (panos / panorama)
myconfigpath = os.path.abspath(os.path.join('..', 'loadable_configs'))
if os.path.isdir(myconfigpath) is False:
os.mkdir(myconfigpath, mode=0o755)
print('created new loadable config directory')
# check that configs folder exists and if not create a new one
# then create snippets and full sub-directories
myconfigdir = '{0}/{1}-{2}'.format(myconfigpath, myconfigdir_name, foldertime)
if os.path.isdir(myconfigdir) is False:
os.mkdir(myconfigdir, mode=0o755)
print('\ncreated new archive folder {0}-{1}'.format(myconfigdir_name, foldertime))
if os.path.isdir('{0}/{1}'.format(myconfigdir, config_type)) is False:
os.mkdir('{0}/{1}'.format(myconfigdir, config_type))
print('created new subdirectories for {0}'.format(config_type))
return myconfigdir
def create_context(config_var_file):
# read the metafile to get variables and values
try:
with open(config_var_file, 'r') as var_metadata:
variables = oyaml.safe_load(var_metadata.read())
except IOError as ioe:
print(f'Could not open metadata file {config_var_file}')
print(ioe)
sys.exit()
# grab the metadata values and convert to key-based dictionary
jinja_context = dict()
for snippet_var in variables['variables']:
jinja_context[snippet_var['name']] = snippet_var['value']
return jinja_context
def template_render(filename, template_path, render_type, context):
'''
render the jinja template using the context value from config_variables.yaml
:param filename: name of the template file
:param template_path: path for the template file
:param render_type: type if full or set commands; aligns with folder name
:param context: dict of variables to render
:return: return the rendered xml file and set conf file
'''
print('..creating template for {0}'.format(filename))
env = Environment(loader=FileSystemLoader('{0}/{1}'.format(template_path, render_type)))
# load our custom jinja filters here, see the function defs below for reference
env.filters['md5_hash'] = md5_hash
env.filters['des_hash'] = des_hash
env.filters['sha512_hash'] = sha512_hash
template = env.get_template(filename)
rendered_template = template.render(context)
return rendered_template
def template_save(snippet_name, myconfigdir, config_type, element):
'''
after rendering the template save to the myconfig directory
each run saves with a unique prefix name + datetime
:param snippet_name: name of the output file
:param myconfigdir: path to the my_config directory
:param config_type: based on initial run list; eg. panos or panorama
:param element: xml element rendered based on input variables; used as folder name
:param render_type: type eg. if full or snippets; aligns with folder name
:return: no value returned (future could be success code)
'''
print('..saving template for {0}'.format(snippet_name))
filename = snippet_name
with open('{0}/{1}/{2}'.format(myconfigdir, config_type, filename), 'w') as configfile:
configfile.write(element)
# copy the variables file used for the render into the my_template folder
var_file = 'loadable_config_vars/config_variables.yaml'
if os.path.isfile('{0}/{1}'.format(myconfigdir, var_file)) is False:
vfilesrc = var_file
vfiledst = '{0}/{1}'.format(myconfigdir, var_file)
shutil.copy(vfilesrc, vfiledst)
return
# define functions for custom jinja filters
def md5_hash(txt):
'''
Returns the MD5 Hashed secret for use as a password hash in the PanOS configuration
:param txt: text to be hashed
:return: password hash of the string with salt and configuration information. Suitable to place in the phash field
in the configurations
'''
return md5_crypt.hash(txt)
def des_hash(txt):
'''
Returns the DES Hashed secret for use as a password hash in the PanOS configuration
:param txt: text to be hashed
:return: password hash of the string with salt and configuration information. Suitable to place in the phash field
in the configurations
'''
return des_crypt.hash(txt)
def sha256_hash(txt):
'''
Returns the SHA256 Hashed secret for use as a password hash in the PanOS configuration
:param txt: text to be hashed
:return: password hash of the string with salt and configuration information. Suitable to place in the
phash field in the configurations
'''
return sha256_crypt.hash(txt)
def sha512_hash(txt):
'''
Returns the SHA512 Hashed secret for use as a password hash in the PanOS configuration
:param txt: text to be hashed
:return: password hash of the string with salt and configuration information. Suitable to place in the
phash field in the configurations
'''
return sha512_crypt.hash(txt)
def replace_variables(config_type, render_type, input_var):
'''
get the input variables and render the output configs with jinja2
inputs are read from the template directory and output to my_config
:param config_type: panos or panorama to read/write to the respective directories
:param archivetime: datetimestamp used for the output my_config folder naming
'''
config_variables = 'config_variables.yaml'
# create dict of values for the jinja template render
context = create_context(config_variables)
# update context dict with variables from user input
for snippet_var in input_var:
context[snippet_var] = input_var[snippet_var]
# get the full path to the output directory we want (panos / panorama)
template_path = os.path.abspath(os.path.join('..',
'templates', config_type))
# append to the sys path for module lookup
sys.path.append(template_path)
# output subdir located in loadable_configs dir
myconfig_path = myconfig_newdir(input_var['output_dir'], input_var['archive_time'])
# render full and set conf files
print('\nworking with {0} config template'.format(render_type))
if render_type == 'full':
filename = 'iron_skillet_{0}_full.xml'.format(config_type)
if render_type == 'set_commands':
filename = 'iron_skillet_{0}_full.conf'.format(config_type)
element = template_render(filename, template_path, render_type, context)
template_save(filename, myconfig_path, config_type, element)
print('\nconfigs have been created and can be found in {0}'.format(myconfig_path))
print('along with the metadata values used to render the configs\n')
return
if __name__ == '__main__':
# Use the timestamp to create a unique folder name
print('=' * 80)
print(' ')
print('Welcome to Iron-Skillet'.center(80))
print(' ')
print('=' * 80)
input_var = {}
# archive_time used as part of the my_config directory name
input_var['archive_time'] = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
print('\ndatetime used for folder creation: {0}\n'.format(input_var['archive_time']))
# this prompts for the prefix name of the output directory
input_var['output_dir'] = input('Enter the name of the output directory: ')
# this prompts for the superuser username to be added into the configuration; no default admin/admin used
input_var['ADMINISTRATOR_USERNAME'] = input('Enter the superuser administrator account username: ')
print('\na phash will be created for superuser {0} and added to the config file\n'.format(
input_var['ADMINISTRATOR_USERNAME']))
passwordmatch = False
# prompt for the superuser password to create a phash and store in the my_config files; no default admin/admin
while passwordmatch is False:
password1 = getpass.getpass("Enter the superuser administrator account password: ")
password2 = getpass.getpass("Enter password again to verify: ")
if password1 == password2:
input_var['ADMINISTRATOR_PASSWORD'] = <PASSWORD>
passwordmatch = <PASSWORD>
else:
print('\nPasswords do not match. Please try again.\n')
# loop through all config types that have their respective template folders
for config_type in ['panos', 'panorama']:
for render_type in ['full', 'set_commands']:
replace_variables(config_type, render_type, input_var) | 2.328125 | 2 |
pactman/verifier/pytest_plugin.py | piotrantosz/pactman | 67 | 4312 | import glob
import logging
import os
import warnings
import pytest
from _pytest.outcomes import Failed
from _pytest.reports import TestReport
from .broker_pact import BrokerPact, BrokerPacts, PactBrokerConfig
from .result import PytestResult, log
def pytest_addoption(parser):
group = parser.getgroup("pact specific options (pactman)")
group.addoption(
"--pact-files", default=None, help="pact JSON files to verify (wildcards allowed)"
)
group.addoption("--pact-broker-url", default="", help="pact broker URL")
group.addoption("--pact-broker-token", default="", help="pact broker bearer token")
group.addoption(
"--pact-provider-name", default=None, help="pact name of provider being verified"
)
group.addoption(
"--pact-consumer-name",
default=None,
help="consumer name to limit pact verification to - "
"DEPRECATED, use --pact-verify-consumer instead",
)
group.addoption(
"--pact-verify-consumer", default=None, help="consumer name to limit pact verification to"
)
group.addoption(
"--pact-verify-consumer-tag",
metavar="TAG",
action="append",
help="limit broker pacts verified to those matching the tag. May be "
"specified multiple times in which case pacts matching any of these "
"tags will be verified.",
)
group.addoption(
"--pact-publish-results",
action="store_true",
default=False,
help="report pact verification results to pact broker",
)
group.addoption(
"--pact-provider-version",
default=None,
help="provider version to use when reporting pact results to pact broker",
)
group.addoption(
"--pact-allow-fail",
default=False,
action="store_true",
help="do not fail the pytest run if any pacts fail verification",
)
# Future options to be implemented. Listing them here so naming consistency can be a thing.
# group.addoption("--pact-publish-pacts", action="store_true", default=False,
# help="publish pacts to pact broker")
# group.addoption("--pact-consumer-version", default=None,
# help="consumer version to use when publishing pacts to the broker")
# group.addoption("--pact-consumer-version-source", default=None,
# help="generate consumer version from source 'git-tag' or 'git-hash'")
# group.addoption("--pact-consumer-version-tag", metavar='TAG', action="append",
# help="tag(s) that should be applied to the consumer version when pacts "
# "are uploaded to the broker; multiple tags may be supplied")
def get_broker_url(config):
return config.getoption("pact_broker_url") or os.environ.get("PACT_BROKER_URL")
def get_provider_name(config):
return config.getoption("pact_provider_name") or os.environ.get("PACT_PROVIDER_NAME")
# add the pact broker URL to the pytest output if running verbose
def pytest_report_header(config):
if config.getoption("verbose") > 0:
location = get_broker_url(config) or config.getoption("pact_files")
return [f"Loading pacts from {location}"]
def pytest_configure(config):
logging.getLogger("pactman").handlers = []
logging.basicConfig(format="%(message)s")
verbosity = config.getoption("verbose")
if verbosity > 0:
log.setLevel(logging.DEBUG)
class PytestPactVerifier:
def __init__(self, publish_results, provider_version, interaction, consumer):
self.publish_results = publish_results
self.provider_version = provider_version
self.interaction = interaction
self.consumer = consumer
def verify(self, provider_url, provider_setup, extra_provider_headers={}):
try:
self.interaction.verify_with_callable_setup(provider_url, provider_setup, extra_provider_headers)
except (Failed, AssertionError) as e:
raise Failed(str(e)) from None
def finish(self):
if self.consumer and self.publish_results and self.provider_version:
self.consumer.publish_result(self.provider_version)
def flatten_pacts(pacts):
for consumer in pacts:
last = consumer.interactions[-1]
for interaction in consumer.interactions:
if interaction is last:
yield (interaction, consumer)
else:
yield (interaction, None)
def load_pact_files(file_location):
for filename in glob.glob(file_location, recursive=True):
yield BrokerPact.load_file(filename, result_factory=PytestResult)
def test_id(identifier):
interaction, _ = identifier
return str(interaction)
def pytest_generate_tests(metafunc):
if "pact_verifier" in metafunc.fixturenames:
broker_url = get_broker_url(metafunc.config)
if not broker_url:
pact_files_location = metafunc.config.getoption("pact_files")
if not pact_files_location:
raise ValueError("need a --pact-broker-url or --pact-files option")
pact_files = load_pact_files(pact_files_location)
metafunc.parametrize(
"pact_verifier", flatten_pacts(pact_files), ids=test_id, indirect=True
)
else:
provider_name = get_provider_name(metafunc.config)
if not provider_name:
raise ValueError("--pact-broker-url requires the --pact-provider-name option")
broker = PactBrokerConfig(
broker_url,
metafunc.config.getoption("pact_broker_token"),
metafunc.config.getoption("pact_verify_consumer_tag", []),
)
broker_pacts = BrokerPacts(
provider_name, pact_broker=broker, result_factory=PytestResult
)
pacts = broker_pacts.consumers()
filter_consumer_name = metafunc.config.getoption("pact_verify_consumer")
if not filter_consumer_name:
filter_consumer_name = metafunc.config.getoption("pact_consumer_name")
if filter_consumer_name:
warnings.warn(
"The --pact-consumer-name command-line option is deprecated "
"and will be removed in the 3.0.0 release.",
DeprecationWarning,
)
if filter_consumer_name:
pacts = [pact for pact in pacts if pact.consumer == filter_consumer_name]
metafunc.parametrize("pact_verifier", flatten_pacts(pacts), ids=test_id, indirect=True)
class PactTestReport(TestReport):
"""Custom TestReport that allows us to attach an interaction to the result, and
then display the interaction's verification result ouput as well as the traceback
of the failure.
"""
@classmethod
def from_item_and_call(cls, item, call, interaction):
report = super().from_item_and_call(item, call)
report.pact_interaction = interaction
# the toterminal() call can't reasonably get at this config, so we store it here
report.verbosity = item.config.option.verbose
return report
def toterminal(self, out):
out.line("Pact failure details:", bold=True)
for text, kw in self.pact_interaction.result.results_for_terminal():
out.line(text, **kw)
if self.verbosity > 0:
out.line("Traceback:", bold=True)
return super().toterminal(out)
else:
out.line("Traceback not shown, use pytest -v to show it")
def pytest_runtest_makereport(item, call):
if call.when != "call" or "pact_verifier" not in getattr(item, "fixturenames", []):
return
# use our custom TestReport subclass if we're reporting on a pact verification call
interaction = item.funcargs["pact_verifier"].interaction
report = PactTestReport.from_item_and_call(item, call, interaction)
if report.failed and item.config.getoption("pact_allow_fail"):
# convert the fail into an "expected" fail, which allows the run to pass
report.wasxfail = True
report.outcome = "passed"
return report
def pytest_report_teststatus(report, config):
if not hasattr(report, "pact_interaction"):
return
if hasattr(report, "wasxfail"):
# wasxfail usually displays an "X" but since it's not *expected* to fail an "f" is a little clearer
return "ignore fail", "f", "IGNORE_FAIL"
@pytest.fixture()
def pact_verifier(pytestconfig, request):
interaction, consumer = request.param
p = PytestPactVerifier(
pytestconfig.getoption("pact_publish_results"),
pytestconfig.getoption("pact_provider_version"),
interaction,
consumer,
)
yield p
p.finish()
| 2.140625 | 2 |
interface/docstring.py | karttur/geoimagine02-grass | 0 | 4313 | # -*- coding: utf-8 -*-
def docstring_property(class_doc):
"""Property attribute for docstrings.
Took from: https://gist.github.com/bfroehle/4041015
>>> class A(object):
... '''Main docstring'''
... def __init__(self, x):
... self.x = x
... @docstring_property(__doc__)
... def __doc__(self):
... return "My value of x is %s." % self.x
>>> A.__doc__
'Main docstring'
>>> a = A(10)
>>> a.__doc__
'My value of x is 10.'
"""
def wrapper(fget):
return DocstringProperty(class_doc, fget)
return wrapper
class DocstringProperty(object):
"""Property for the `__doc__` attribute.
Different than `property` in the following two ways:
* When the attribute is accessed from the main class, it returns the value
of `class_doc`, *not* the property itself. This is necessary so Sphinx
and other documentation tools can access the class docstring.
* Only supports getting the attribute; setting and deleting raise an
`AttributeError`.
"""
def __init__(self, class_doc, fget):
self.class_doc = class_doc
self.fget = fget
def __get__(self, obj, type=None):
if obj is None:
return self.class_doc
else:
return self.fget(obj)
def __set__(self, obj, value):
raise AttributeError("can't set attribute")
def __delete__(self, obj):
raise AttributeError("can't delete attribute")
| 2.765625 | 3 |
autocnet/matcher/cuda_matcher.py | gsn9/autocnet | 0 | 4314 | <reponame>gsn9/autocnet
import warnings
try:
import cudasift as cs
except:
cs = None
import numpy as np
import pandas as pd
def match(edge, aidx=None, bidx=None, **kwargs):
"""
Apply a composite CUDA matcher and ratio check. If this method is used,
no additional ratio check is necessary and no symmetry check is required.
The ratio check is embedded on the cuda side and returned as an
ambiguity value. In testing symmetry is not required as it is expensive
without significant gain in accuracy when using this implementation.
"""
source_kps = edge.source.get_keypoints(index=aidx)
source_des = edge.source.descriptors[aidx]
source_map = {k:v for k, v in enumerate(source_kps.index)}
destin_kps = edge.destination.get_keypoints(index=bidx)
destin_des = edge.destination.descriptors[bidx]
destin_map = {k:v for k, v in enumerate(destin_kps.index)}
s_siftdata = cs.PySiftData.from_data_frame(source_kps, source_des)
d_siftdata = cs.PySiftData.from_data_frame(destin_kps, destin_des)
cs.PyMatchSiftData(s_siftdata, d_siftdata)
matches, _ = s_siftdata.to_data_frame()
# Matches are reindexed 0-n, but need to be remapped to the source_kps,
# destin_kps indices. This is the mismatch)
source = np.empty(len(matches))
source[:] = edge.source['node_id']
destination = np.empty(len(matches))
destination[:] = edge.destination['node_id']
df = pd.concat([pd.Series(source), pd.Series(matches.index),
pd.Series(destination), matches.match,
matches.score, matches.ambiguity], axis=1)
df.columns = ['source_image', 'source_idx', 'destination_image',
'destination_idx', 'score', 'ambiguity']
df.source_idx = df.source_idx.map(source_map)
df.destination_idx = df.destination_idx.map(destin_map)
# Set the matches and set the 'ratio' (ambiguity) mask
edge.matches = df
| 1.96875 | 2 |
app/apis/__init__.py | FabienArcellier/blueprint-webapp-flask-restx | 0 | 4315 | from flask_restx import Api
from app.apis.hello import api as hello
api = Api(
title='api',
version='1.0',
description='',
prefix='/api',
doc='/api'
)
api.add_namespace(hello)
| 2.046875 | 2 |
tests/test_core.py | Kantouzin/brainfuck | 0 | 4316 | <gh_stars>0
# coding: utf-8
import unittest
from test.support import captured_stdout
from brainfuck import BrainFuck
class TestCore(unittest.TestCase):
def test_hello_world(self):
bf = BrainFuck()
with captured_stdout() as stdout:
bf.run()
self.assertEqual(stdout.getvalue(), "Hello, world!\n")
def test_fizzbuzz(self):
bf = BrainFuck()
bf.load_file("./tests/fizz_buzz.txt")
with captured_stdout() as stdout:
bf.run()
fizzbuzz_list = list()
for i in range(1, 101):
if i % 15 == 0:
fizzbuzz_list.append("FizzBuzz")
elif i % 3 == 0:
fizzbuzz_list.append("Fizz")
elif i % 5 == 0:
fizzbuzz_list.append("Buzz")
else:
fizzbuzz_list.append(str(i))
fizzbuzz_list.append("\n")
self.assertEqual(stdout.getvalue(), " ".join(fizzbuzz_list))
def test_set_command(self):
bf = BrainFuck()
bf.set_command("にゃにゃ", "にゃー", "にゃっ", "にゃん",
"にゃ。", "にゃ、", "「", "」")
bf.load_file("./tests/hello_world_nya.txt")
with captured_stdout() as stdout:
bf.run()
self.assertEqual(stdout.getvalue(), "Hello, world!\n")
if __name__ == "__main__":
unittest.main()
| 2.890625 | 3 |
main.py | poltavski/social-network-frontend | 0 | 4317 | from fastapi import FastAPI, Request, Response
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from utils import get_page_data, process_initial
import uvicorn
app = FastAPI()
templates = Jinja2Templates(directory="templates")
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/", response_class=HTMLResponse)
async def home(request: Request):
# Expect requests with cookies
return process_initial(request)
@app.get("/page", response_class=HTMLResponse)
async def home(request: Request):
# Expect requests with cookies
return get_page_data(request)
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=8050, log_level="info")
| 2.640625 | 3 |
Core/Python/create_static_group.py | Ku-Al/OpenManage-Enterprise | 0 | 4318 | <gh_stars>0
#
# Python script using OME API to create a new static group
#
# _author_ = <NAME> <<EMAIL>>
# _version_ = 0.1
#
# Copyright (c) 2020 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
SYNOPSIS:
Script to create a new static group
DESCRIPTION:
This script exercises the OME REST API to create a new static
group. The user is responsible for adding devices to the
group once the group has been successfully created.
For authentication X-Auth is used over Basic Authentication
Note that the credentials entered are not stored to disk.
EXAMPLE:
python create_static_group.py --ip <xx> --user <username>
--password <<PASSWORD>> --groupname "Random Test Group"
"""
import json
import argparse
from argparse import RawTextHelpFormatter
import urllib3
import requests
def create_static_group(ip_address, user_name, password, group_name):
""" Authenticate with OME and enumerate groups """
try:
session_url = 'https://%s/api/SessionService/Sessions' % ip_address
group_url = "https://%s/api/GroupService/Groups?$filter=Name eq 'Static Groups'" % ip_address
headers = {'content-type': 'application/json'}
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=headers)
if session_info.status_code == 201:
headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
response = requests.get(group_url, headers=headers, verify=False)
if response.status_code == 200:
json_data = response.json()
if json_data['@odata.count'] > 0:
# Technically there should be only one result in the filter
group_id = json_data['value'][0]['Id']
group_payload = {"GroupModel": {
"Name": group_name,
"Description": "",
"MembershipTypeId": 12,
"ParentId": int(group_id)}
}
create_url = 'https://%s/api/GroupService/Actions/GroupService.CreateGroup' % ip_address
create_resp = requests.post(create_url, headers=headers,
verify=False,
data=json.dumps(group_payload))
if create_resp.status_code == 200:
print("New group created : ID =", create_resp.text)
elif create_resp.status_code == 400:
print("Failed group creation ...See error info below")
print(json.dumps(create_resp.json(), indent=4,
sort_keys=False))
else:
print("Unable to retrieve group list from %s" % ip_address)
else:
print("Unable to create a session with appliance %s" % ip_address)
except Exception as error:
print("Unexpected error:", str(error))
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False,
help="Username for OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=True,
help="Password for OME Appliance")
parser.add_argument("--groupname", "-g", required=True,
help="A valid name for the group")
args = parser.parse_args()
create_static_group(args.ip, args.user, args.password, args.groupname)
| 2.703125 | 3 |
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py | kagrze/ignite | 0 | 4319 | <filename>examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py
from typing import Callable, Optional, Tuple, Union
import numpy as np
from torch.utils.data import DataLoader, Sampler
from torch.utils.data.dataset import Subset, ConcatDataset
import torch.utils.data.distributed as data_dist
from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset
def get_train_val_loaders(root_path: str,
train_transforms: Callable,
val_transforms: Callable,
batch_size: int = 16,
num_workers: int = 8,
val_batch_size: Optional[int] = None,
pin_memory: bool = True,
random_seed: Optional[int] = None,
train_sampler: Optional[Union[Sampler, str]] = None,
val_sampler: Optional[Union[Sampler, str]] = None,
with_sbd: Optional[str] = None,
limit_train_num_samples: Optional[int] = None,
limit_val_num_samples: Optional[int] = None) -> Tuple[DataLoader, DataLoader, DataLoader]:
train_ds = get_train_dataset(root_path)
val_ds = get_val_dataset(root_path)
if with_sbd is not None:
sbd_train_ds = get_train_noval_sbdataset(with_sbd)
train_ds = ConcatDataset([train_ds, sbd_train_ds])
if random_seed is not None:
np.random.seed(random_seed)
if limit_train_num_samples is not None:
train_indices = np.random.permutation(len(train_ds))[:limit_train_num_samples]
train_ds = Subset(train_ds, train_indices)
if limit_val_num_samples is not None:
val_indices = np.random.permutation(len(val_ds))[:limit_val_num_samples]
val_ds = Subset(val_ds, val_indices)
# random samples for evaluation on training dataset
if len(val_ds) < len(train_ds):
train_eval_indices = np.random.permutation(len(train_ds))[:len(val_ds)]
train_eval_ds = Subset(train_ds, train_eval_indices)
else:
train_eval_ds = train_ds
train_ds = TransformedDataset(train_ds, transform_fn=train_transforms)
val_ds = TransformedDataset(val_ds, transform_fn=val_transforms)
train_eval_ds = TransformedDataset(train_eval_ds, transform_fn=val_transforms)
if isinstance(train_sampler, str):
assert train_sampler == 'distributed'
train_sampler = data_dist.DistributedSampler(train_ds)
if isinstance(val_sampler, str):
assert val_sampler == 'distributed'
val_sampler = data_dist.DistributedSampler(val_ds, shuffle=False)
train_loader = DataLoader(train_ds, shuffle=train_sampler is None,
batch_size=batch_size, num_workers=num_workers,
sampler=train_sampler,
pin_memory=pin_memory, drop_last=True)
val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size
val_loader = DataLoader(val_ds, shuffle=False, sampler=val_sampler,
batch_size=val_batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
train_eval_loader = DataLoader(train_eval_ds, shuffle=False, sampler=val_sampler,
batch_size=val_batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
return train_loader, val_loader, train_eval_loader
def get_inference_dataloader(root_path: str,
mode: str,
transforms: Callable,
batch_size: int = 16,
num_workers: int = 8,
pin_memory: bool = True,
limit_num_samples: Optional[int] = None) -> DataLoader:
assert mode in ('train', 'test'), "Mode should be 'train' or 'test'"
get_dataset_fn = get_train_dataset if mode == "train" else get_val_dataset
dataset = get_dataset_fn(root_path, return_meta=True)
if limit_num_samples is not None:
indices = np.random.permutation(len(dataset))[:limit_num_samples]
dataset = Subset(dataset, indices)
dataset = TransformedDataset(dataset, transform_fn=transforms)
loader = DataLoader(dataset, shuffle=False,
batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
return loader
| 2.3125 | 2 |
saleor/core/jwt.py | autobotasia/saleor | 1 | 4320 | <filename>saleor/core/jwt.py
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import graphene
import jwt
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from ..account.models import User
from ..app.models import App
from .permissions import (
get_permission_names,
get_permissions_from_codenames,
get_permissions_from_names,
)
JWT_ALGORITHM = "HS256"
SALEOR_AUTH_HEADER = "HTTP_AUTHORIZATION_BEARER"
DEFAULT_AUTH_HEADER = "HTTP_AUTHORIZATION"
AUTH_HEADER_PREFIXES = ["JWT", "BEARER"]
JWT_ACCESS_TYPE = "access"
JWT_REFRESH_TYPE = "refresh"
JWT_THIRDPARTY_ACCESS_TYPE = "thirdparty"
JWT_REFRESH_TOKEN_COOKIE_NAME = "refreshToken"
PERMISSIONS_FIELD = "permissions"
JWT_SALEOR_OWNER_NAME = "saleor"
JWT_OWNER_FIELD = "owner"
def jwt_base_payload(
exp_delta: Optional[timedelta], token_owner: str
) -> Dict[str, Any]:
utc_now = datetime.utcnow()
payload = {"iat": utc_now, JWT_OWNER_FIELD: token_owner}
if exp_delta:
payload["exp"] = utc_now + exp_delta
return payload
def jwt_user_payload(
user: User,
token_type: str,
exp_delta: Optional[timedelta],
additional_payload: Optional[Dict[str, Any]] = None,
token_owner: str = JWT_SALEOR_OWNER_NAME,
) -> Dict[str, Any]:
payload = jwt_base_payload(exp_delta, token_owner)
payload.update(
{
"token": user.jwt_token_key,
"email": user.email,
"type": token_type,
"user_id": graphene.Node.to_global_id("User", user.id),
"is_staff": user.is_staff,
"is_supplier": user.is_supplier,
}
)
if additional_payload:
payload.update(additional_payload)
return payload
def jwt_encode(payload: Dict[str, Any]) -> str:
return jwt.encode(
payload,
settings.SECRET_KEY, # type: ignore
JWT_ALGORITHM,
)
def jwt_decode_with_exception_handler(
token: str, verify_expiration=settings.JWT_EXPIRE
) -> Optional[Dict[str, Any]]:
try:
return jwt_decode(token, verify_expiration=verify_expiration)
except jwt.PyJWTError:
return None
def jwt_decode(token: str, verify_expiration=settings.JWT_EXPIRE) -> Dict[str, Any]:
return jwt.decode(
token,
settings.SECRET_KEY, # type: ignore
algorithms=[JWT_ALGORITHM],
options={"verify_exp": verify_expiration},
)
def create_token(payload: Dict[str, Any], exp_delta: timedelta) -> str:
payload.update(jwt_base_payload(exp_delta, token_owner=JWT_SALEOR_OWNER_NAME))
return jwt_encode(payload)
def create_access_token(
user: User, additional_payload: Optional[Dict[str, Any]] = None
) -> str:
payload = jwt_user_payload(
user, JWT_ACCESS_TYPE, settings.JWT_TTL_ACCESS, additional_payload
)
return jwt_encode(payload)
def create_refresh_token(
user: User, additional_payload: Optional[Dict[str, Any]] = None
) -> str:
payload = jwt_user_payload(
user,
JWT_REFRESH_TYPE,
settings.JWT_TTL_REFRESH,
additional_payload,
)
return jwt_encode(payload)
def get_token_from_request(request: WSGIRequest) -> Optional[str]:
auth_token = request.META.get(SALEOR_AUTH_HEADER)
if not auth_token:
auth = request.META.get(DEFAULT_AUTH_HEADER, "").split(maxsplit=1)
if len(auth) == 2 and auth[0].upper() in AUTH_HEADER_PREFIXES:
auth_token = auth[1]
return auth_token
def get_user_from_payload(payload: Dict[str, Any]) -> Optional[User]:
user = User.objects.filter(email=payload["email"], is_active=True).first()
user_jwt_token = payload.get("token")
if not user_jwt_token or not user:
raise jwt.InvalidTokenError(
"Invalid token. Create new one by using tokenCreate mutation."
)
if user.jwt_token_key != user_jwt_token:
raise jwt.InvalidTokenError(
"Invalid token. Create new one by using tokenCreate mutation."
)
return user
def is_saleor_token(token: str) -> bool:
"""Confirm that token was generated by Saleor not by plugin."""
try:
payload = jwt.decode(token, options={"verify_signature": False})
except jwt.PyJWTError:
return False
owner = payload.get(JWT_OWNER_FIELD)
if not owner or owner != JWT_SALEOR_OWNER_NAME:
return False
return True
def get_user_from_access_token(token: str) -> Optional[User]:
if not is_saleor_token(token):
return None
payload = jwt_decode(token)
return get_user_from_access_payload(payload)
def get_user_from_access_payload(payload: dict) -> Optional[User]:
jwt_type = payload.get("type")
if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]:
raise jwt.InvalidTokenError(
"Invalid token. Create new one by using tokenCreate mutation."
)
permissions = payload.get(PERMISSIONS_FIELD, None)
user = get_user_from_payload(payload)
if user and permissions is not None:
token_permissions = get_permissions_from_names(permissions)
token_codenames = [perm.codename for perm in token_permissions]
user.effective_permissions = get_permissions_from_codenames(token_codenames)
user.is_staff = True if user.effective_permissions else False
return user
def create_access_token_for_app(app: "App", user: "User"):
"""Create access token for app.
App can use user jwt token to proceed given operation on the Saleor side.
The token which can be used by App has additional field defining the permissions
assigned to it. The permissions set is the intersection of user permissions and
app permissions.
"""
app_permissions = app.permissions.all()
app_permission_enums = get_permission_names(app_permissions)
permissions = user.effective_permissions
user_permission_enums = get_permission_names(permissions)
app_id = graphene.Node.to_global_id("App", app.id)
additional_payload = {
"app": app_id,
PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums),
}
payload = jwt_user_payload(
user,
JWT_THIRDPARTY_ACCESS_TYPE,
exp_delta=settings.JWT_TTL_APP_ACCESS,
additional_payload=additional_payload,
)
return jwt_encode(payload)
| 1.992188 | 2 |
locust/configuration.py | pancaprima/locust | 1 | 4321 | <filename>locust/configuration.py
import os, json, logging, jsonpath_rw_ext, jsonpath_rw
from jsonpath_rw import jsonpath, parse
from . import events
from ast import literal_eval
from flask import make_response
logger = logging.getLogger(__name__)
CONFIG_PATH = '/tests/settings/config.json'
class ClientConfiguration:
"""
This class is a handler for data configuration with JSON data structure.
"""
def __init__(self):
self.config_data = None
def read_json(self, path=None):
"""
Will get the data of configuration as JSON.
It reads configuration file once.
"""
if self.config_data is None:
if path is None:
path = CONFIG_PATH
else :
if path.startswith('./') :
path = path[1:]
elif not path.startswith('/'):
path = '/%s' % (path)
try:
with open((os.environ['PYTHONPATH'].split(os.pathsep))[-1] + path, "r") as data_file:
self.config_data = json.load(data_file)
except Exception as err:
logger.info(err)
self.config_data = json.load({})
return self.config_data
def update_json_config(self, json_added, json_path, options, list_column, config_text):
"""
Write JSON file configuration
"""
data = literal_eval(config_text)
if(options != "replace"):
json_target = jsonpath_rw_ext.match(json_path, data)
if isinstance(json_target[0], dict):
if len(list_column)==1:
json_target[0][list_column[0]] = json_added
json_final = json_target[0]
else:
return False, json.dumps(data, indent=4)
else:
for json_target_value in json_target[0]:
json_added.append(json_target_value)
json_final = json_added
else:
json_final = json_added
jsonpath_expr = parse(json_path)
matches = jsonpath_expr.find(data)
if len(matches)==0:
return make_response(json.dumps({'success':False, 'message':'JSON path not found.'}))
for match in matches:
data = ClientConfiguration.update_json(data, ClientConfiguration.get_path(match), json_final)
return make_response(json.dumps({'success':True, 'data':json.dumps(data, indent=4)}))
@classmethod
def get_path(self, match):
"""
Return an iterator based upon MATCH.PATH. Each item is a path component,
start from outer most item.
"""
if match.context is not None:
for path_element in ClientConfiguration.get_path(match.context):
yield path_element
yield str(match.path)
@classmethod
def update_json(self, json, path, value):
"""
Update JSON dictionary PATH with VALUE. Return updated JSON
"""
try:
first = next(path)
# check if item is an array
if (first.startswith('[') and first.endswith(']')) or (first.startswith('{') and first.endswith('}')):
try:
first = int(first[1:-1])
except ValueError:
pass
json[first] = ClientConfiguration.update_json(json[first], path, value)
return json
except StopIteration:
return value
| 2.5625 | 3 |
data/migrations/0023_discardaction_answers.py | SIXMON/peps | 5 | 4322 | <reponame>SIXMON/peps
# Generated by Django 2.2.4 on 2019-11-14 16:48
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0022_discardaction'),
]
operations = [
migrations.AddField(
model_name='discardaction',
name='answers',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
]
| 1.703125 | 2 |
app/models.py | juanitodread/pitaya-falcon | 0 | 4323 | from json import JSONEncoder
from time import time
class Jsonable:
"""Abstract class to standardize the toJson method to be implemented by any class that wants to be
serialized to JSON"""
def toJson(self):
"""Abstract method"""
raise NotImplementedError('You should implement this method in your classes.')
class CommonMessage(Jsonable):
def __init__(self):
self.client = Client()
self.emitter = Emitter()
self.type = ""
self.body = ""
self.tags = ["music", "culture", "food"]
def toJson(self):
return dict(client=self.client, emitter=self.emitter, type=self.type, body=self.body, tags=self.tags)
class Client(Jsonable):
def __init__(self):
self.id = ""
self.name = ""
self.time = int(round(time() * 1000))
def toJson(self):
return dict(id=self.id, name=self.name, time=self.time)
class Emitter(Jsonable):
def __init__(self):
self.id = ""
def toJson(self):
return dict(id=self.id)
class ComplexJsonEncoder(JSONEncoder):
"""Basic JSON encoder for 'complex (nested)' Python objects."""
def default(self, o):
if hasattr(o, 'toJson'):
return o.toJson()
else:
return JSONEncoder.default(self, o)
| 3.546875 | 4 |
compute_pi.py | jakobkogler/pi_memorize | 0 | 4324 | <gh_stars>0
"""Compute pi."""
from decimal import Decimal, getcontext
import argparse
import itertools
class ComputePi:
"""Compute pi to a specific precision using multiple algorithms."""
@staticmethod
def BBP(precision):
"""Compute pi using the Bailey-Borwein-Plouffe formula."""
getcontext().prec = precision + 20
pi = Decimal(0)
for k in itertools.count():
term = (Decimal(4)/(8*k+1) - Decimal(2)/(8*k+4) - Decimal(1)/(8*k+5) - Decimal(1)/(8*k+6))
term /= Decimal(16)**k
pi += term
if term < Decimal(10)**(-precision-10):
break
pi = str(pi)[:-19]
return pi
@staticmethod
def arctan_euler(x, one=1000000):
"""Calculate arctan(1/x) using euler's accelerated formula.
Based on http://www.craig-wood.com/nick/articles/pi-machin/"""
x_squared = x * x
x_squared_plus_1 = x_squared + 1
term = (x * one) // x_squared_plus_1
total = term
two_n = 2
while 1:
divisor = (two_n+1) * x_squared_plus_1
term *= two_n
term += divisor // 2 # round the division
term = term // divisor
if term == 0:
break
total += term
two_n += 2
return total
@staticmethod
def machin_euler(digits):
"""Compute pi using Machin's formula.
Based on http://www.craig-wood.com/nick/articles/pi-machin/"""
one = 10**(digits + 20)
pi = 4*(4*ComputePi.arctan_euler(5, one) - ComputePi.arctan_euler(239, one))
pi //= 10**20
return '3.{}'.format(str(pi)[1:])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculates pi.')
parser.add_argument('--precision', type=int, default=100,
help='The desired precision of pi (default: 100 digits)')
args = parser.parse_args()
pi_computer = ComputePi()
print(pi_computer.machin_euler(args.precision))
| 3.3125 | 3 |
scripts/01_deploy_data_types.py | LaMemeBete/nodys-smart-contract | 0 | 4325 | #!/usr/bin/python3
import time
from brownie import (
DataTypes,
TransparentUpgradeableProxy,
ProxyAdmin,
config,
network,
Contract,
)
from scripts.helpful_scripts import get_account, encode_function_data
def main():
account = get_account()
print(config["networks"][network.show_active()])
print(f"Deploying to {network.show_active()}")
data_types = DataTypes.deploy(
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
# Optional, deploy the ProxyAdmin and use that as the admin contract
proxy_admin = ProxyAdmin.deploy(
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
# If we want an intializer function we can add
# `initializer=box.store, 1`
# to simulate the initializer being the `store` function
# with a `newValue` of 1
# data_types_encoded_initializer_function = encode_function_data(data_types.setDataTypes)
data_types_encoded_initializer_function = encode_function_data(
data_types.setDataTypes, 10
)
proxy = TransparentUpgradeableProxy.deploy(
data_types.address,
proxy_admin.address,
data_types_encoded_initializer_function,
# gas limit removed fort an issue not very clear
# {"from": account, "gas_limit": 100000000000},
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
print(f"Proxy deployed to {proxy} ! You can now upgrade it to dataTypesV2!")
proxy_data_types = Contract.from_abi("DataTypes", proxy.address, DataTypes.abi)
| 2.21875 | 2 |
modules/BidirectionalLSTM.py | omni-us/pytorch-retinanet | 12 | 4326 | <filename>modules/BidirectionalLSTM.py
import torch.nn as nn
class BidirectionalLSTM(nn.Module):
# Module to extract BLSTM features from convolutional feature map
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
self.rnn.cuda()
self.embedding.cuda()
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
return output
| 2.875 | 3 |
release/stubs.min/System/Windows/Forms/__init___parts/PaintEventArgs.py | tranconbv/ironpython-stubs | 0 | 4327 | class PaintEventArgs(EventArgs,IDisposable):
"""
Provides data for the System.Windows.Forms.Control.Paint event.
PaintEventArgs(graphics: Graphics,clipRect: Rectangle)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return PaintEventArgs()
def Dispose(self):
"""
Dispose(self: PaintEventArgs)
Releases all resources used by the System.Windows.Forms.PaintEventArgs.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,graphics,clipRect):
""" __new__(cls: type,graphics: Graphics,clipRect: Rectangle) """
pass
ClipRectangle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the rectangle in which to paint.
Get: ClipRectangle(self: PaintEventArgs) -> Rectangle
"""
Graphics=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the graphics used to paint.
Get: Graphics(self: PaintEventArgs) -> Graphics
"""
| 2.109375 | 2 |
main.py | JaekwangCha/my_pytorch_templet | 0 | 4328 | # written by <NAME>
# version 0.1
# ================== IMPORT CUSTOM LEARNING LIBRARIES ===================== #
from customs.train import train, test
from customs.dataset import load_dataset
from customs.model import load_model
# ================== TRAINING SETTINGS ================== #
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--train_method', default='supervised', type=str, help='type of training: supervised(default), unsupervised, reinforce')
parser.add_argument('--task', default='classification', type=str, help='task of training: classification(default), regression')
parser.add_argument('--dataset', default='mnist', type=str, help='dataset to use')
parser.add_argument('--model', default='CNN', type=str, help='model to use')
parser.add_argument('--seed', default=42, type=int, help='random seed (default: 42)')
parser.add_argument('--num_worker', default=1, type=int, help='number of dataloader worker')
parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--gpu', default=0, type=str, help='GPU-id for GPU to use')
parser.add_argument('--multi_gpu', default=0, type=str, help='GPU-ids for multi-GPU usage')
parser.add_argument('--pin_memory', default=True, type=bool, help='pin memory option selector')
parser.add_argument('--save_model', action='store_true', default=False, help='For Saving the current Model')
parser.add_argument('--save_path', default=os.getcwd()+'/weights', type=str, help='Where to save weights')
parser.add_argument('--log_path', default=os.getcwd()+'/Logs', type=str, help='Where to save Logs')
# data setting
parser.add_argument('--val_rate', default=0.2, type=float, help='split rate for the validation data')
parser.add_argument('--transform', default='default', type=str, help='choose the data transform type')
# training parameter setting
parser.add_argument('--n_epoch', default=10, type=int, help='number of total training iteration')
parser.add_argument('--batch_size', default=32, type=int, help='size of minibatch')
parser.add_argument('--test_batch_size', default=32, type=int, help='size of test-minibatch')
# optimizer & scheduler setting
parser.add_argument('--lr', default=0.03, type=float, help='training learning rate')
parser.add_argument('--optimizer', default='adam', type=str, help='optimizer select')
parser.add_argument('--scheduler', default='steplr', type=str, help='scheduler select')
opt = parser.parse_args()
# ===================== IMPORT PYTORCH LIBRARIES ================== #
import torch
from torch.utils.data import DataLoader
torch.manual_seed(opt.seed)
# ================== GPU SETTINGS ================== #
def gpu_setup(opt):
use_cuda = not opt.no_cuda and torch.cuda.is_available()
os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID"
if opt.multi_gpu != 0:
print()
print('Activating multi-gpu training mode')
print(opt.multi_gpu)
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.multi_gpu)
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
print()
print('Activating single-gpu training mode')
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu)
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using gpu number ' + str(opt.gpu))
return use_cuda
# ======================= MAIN SCRIPT ============================= #
def main(opt):
use_cuda = gpu_setup(opt)
dataset_train, dataset_validation = load_dataset(opt, train=True)
print('training data size: {}'.format(len(dataset_train)))
print('validation data size: {}'.format(len(dataset_validation)))
dataset_test = load_dataset(opt, train=False)
print('test data size: {}'.format(len(dataset_test)))
print()
kwargs = {'num_workers': opt.num_worker, 'pin_memory': opt.pin_memory} if use_cuda else {}
train_dataloader = DataLoader(dataset_train, batch_size=opt.batch_size, shuffle=True, **kwargs)
validation_dataloader = DataLoader(dataset_validation, batch_size=opt.batch_size, shuffle=True, **kwargs)
test_dataloader = DataLoader(dataset_test, batch_size=opt.test_batch_size, shuffle=True, **kwargs)
model = load_model(opt)
if opt.multi_gpu != 0:
model = torch.nn.DataParallel(model)
model.to(opt.device)
train(opt, model, train_dataloader, validation_dataloader)
test(opt, model, test_dataloader)
if __name__ == '__main__':
main(opt)
| 2.140625 | 2 |
test/core/024-sc4-gridftp-http/Rosetta.py | ahnitz/pegasus | 127 | 4329 | <reponame>ahnitz/pegasus
#!/usr/bin/env python3
import logging
import sys
import subprocess
from pathlib import Path
from datetime import datetime
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
# --- Work Dir Setup -----------------------------------------------------------
RUN_ID = "024-sc4-gridftp-http-" + datetime.now().strftime("%s")
TOP_DIR = Path.cwd()
WORK_DIR = TOP_DIR / "work"
try:
Path.mkdir(WORK_DIR)
except FileExistsError:
pass
# --- Configuration ------------------------------------------------------------
print("Generating pegasus.properties at: {}".format(TOP_DIR / "pegasus.properties"))
props = Properties()
props["pegasus.dir.useTimestamp"] = "true"
props["pegasus.dir.storage.deep"] = "false"
props["pegasus.data.configuration"] = "nonsharedfs"
with (TOP_DIR / "pegasus.properties").open(mode="w") as f:
props.write(f)
# --- Sites --------------------------------------------------------------------
print("Generating site catalog at: sites.yml")
LOCAL = "local"
CONDOR_POOL = "condorpool"
STAGING_SITE = "staging_site"
try:
pegasus_config = subprocess.run(
["pegasus-config", "--bin"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except FileNotFoundError as e:
print("Unable to find pegasus-config")
assert pegasus_config.returncode == 0
PEGASUS_BIN_DIR = pegasus_config.stdout.decode().strip()
sites = """
pegasus: "5.0"
sites:
-
name: "condor_pool"
arch: "x86_64"
os.type: "linux"
profiles:
condor:
universe: "vanilla"
pegasus:
style: "condor"
-
name: "staging_site"
arch: "x86_64"
os.type: "linux"
directories:
-
type: "sharedScratch"
path: "/lizard/scratch-90-days/http-scratch/ptesting"
fileServers:
-
operation: "get"
url: "http://workflow.isi.edu/shared-scratch/ptesting"
-
operation: "put"
url: "gsiftp://workflow.isi.edu/lizard/scratch-90-days/http-scratch/ptesting"
-
name: "local"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
directories:
-
type: "sharedScratch"
path: "{work_dir}/scratch"
fileServers:
-
operation: "all"
url: "file://{work_dir}/scratch"
-
type: "localStorage"
path: "{work_dir}/outputs"
fileServers:
-
operation: "all"
url: "file://{work_dir}/outputs"
profiles:
env:
PEGASUS_BIN_DIR: "{pegasus_bin_dir}"
""".format(
work_dir=str(WORK_DIR), pegasus_bin_dir=PEGASUS_BIN_DIR
)
with (TOP_DIR / "sites.yml").open(mode="w") as f:
f.write(sites)
# --- Transformations ----------------------------------------------------------
rosetta_exe = Transformation(
"rosetta.exe",
arch=Arch.X86_64,
os_type=OS.LINUX,
site="local",
pfn="file://" + str(TOP_DIR / "rosetta.exe"),
is_stageable=True,
).add_pegasus_profile(clusters_size=3)
tc = TransformationCatalog().add_transformations(rosetta_exe)
# --- Replicas & Workflow ------------------------------------------------------
rc = ReplicaCatalog()
# add all files in minirosetta_database
inputs = list()
def get_files(d: Path) -> None:
for p in d.iterdir():
if p.is_file():
f = File(str(p))
inputs.append(f)
rc.add_replica(LOCAL, str(p), str(p.resolve()))
else:
get_files(p)
get_files(Path("minirosetta_database"))
f1 = File("design.resfile")
inputs.append(f1)
rc.add_replica(LOCAL, f1, str(Path("design.resfile").resolve()))
f2 = File("repack.resfile")
inputs.append(f2)
rc.add_replica(LOCAL, f2, str(Path("repack.resfile").resolve()))
wf = Workflow("rosetta")
pdb_files = list(Path("pdbs").iterdir())
for i in range(10):
current_file = pdb_files[i]
if current_file.is_file():
job = (
Job(rosetta_exe, _id=current_file.name.replace(".pdb", ""))
.add_inputs(File(current_file.name), *inputs)
.add_outputs(File(current_file.name + ".score.sc"), register_replica=True)
.add_args(
"-in:file:s",
current_file.name,
"-out:prefix " + current_file.name + ".",
"-database ./minirosetta_database",
"-linmem_ig 10",
"-nstruct 1",
"-pert_num 2",
"-inner_num 1",
"-jd2::ntrials 1",
)
)
rc.add_replica("local", current_file.name, str(current_file.resolve()))
wf.add_jobs(job)
# write rc to separate file for registration jobs
with (TOP_DIR / "replicas.yml").open("w") as f:
rc.write(f)
wf.add_transformation_catalog(tc)
try:
wf.plan(
dir=str(WORK_DIR),
verbose=5,
sites=[CONDOR_POOL],
staging_sites={CONDOR_POOL: STAGING_SITE},
)
except PegasusClientError as e:
print(e.output)
| 1.921875 | 2 |
tests/nls_smoother_test.py | sisl/CEEM | 5 | 4330 | <reponame>sisl/CEEM
import torch
from ceem.opt_criteria import *
from ceem.systems import LorenzAttractor
from ceem.dynamics import *
from ceem.smoother import *
from ceem import utils
def test_smoother():
utils.set_rng_seed(1)
torch.set_default_dtype(torch.float64)
sigma = torch.tensor([10.])
rho = torch.tensor([28.])
beta = torch.tensor([8. / 3.])
C = torch.randn(2, 3)
dt = 0.04
sys = LorenzAttractor(sigma, rho, beta, C, dt, method='midpoint')
B = 1
T = 200
xs = [torch.randn(B, 1, 3)]
for t in range(T - 1):
xs.append(sys.step(torch.tensor([0.] * B), xs[-1]))
x = torch.cat(xs, dim=1).detach()
x.requires_grad = True
y = sys.observe(0., x).detach()
# y += torch.rand_like(y) * 0.01
t = torch.stack([torch.arange(T), torch.arange(T)]).to(torch.get_default_dtype())
x0 = torch.zeros_like(x)
obscrit = GaussianObservationCriterion(torch.ones(2), t, y)
dyncrit = GaussianDynamicsCriterion(torch.ones(3), t)
# Test GroupSOSCriterion
crit = GroupSOSCriterion([obscrit, dyncrit])
xsm, metrics = NLSsmoother(x0, crit, sys, solver_kwargs={'verbose': 2, 'tr_rho': 0.})
err = float((xsm - x).norm())
assert err < 1e-8, 'Smoothing Error: %.3e' % err
print('Passed.')
# Test BlockSparseGroupSOSCriterion
crit = BlockSparseGroupSOSCriterion([obscrit, dyncrit])
xsm, metrics = NLSsmoother(torch.zeros_like(x), crit, sys)
err = float((xsm - x).norm())
assert err < 1e-8, 'Smoothing Error: %.3e' % err
print('Passed.')
if __name__ == '__main__':
test_smoother()
| 2.03125 | 2 |
qiskit/visualization/pulse_v2/device_info.py | godspeed5/qiskit-terra | 15 | 4331 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""A collection of backend information formatted to generate drawing data.
This instance will be provided to generator functions. The module provides an abstract
class :py:class:``DrawerBackendInfo`` with necessary methods to generate drawing objects.
Because the data structure of backend class may depend on providers, this abstract class
has an abstract factory method `create_from_backend`. Each subclass should provide
the factory method which conforms to the associated provider. By default we provide
:py:class:``OpenPulseBackendInfo`` class that has the factory method taking backends
satisfying OpenPulse specification [1].
This class can be also initialized without the factory method by manually specifying
required information. This may be convenient for visualizing a pulse program for simulator
backend that only has a device Hamiltonian information. This requires two mapping objects
for channel/qubit and channel/frequency along with the system cycle time.
If those information are not provided, this class will be initialized with a set of
empty data and the drawer illustrates a pulse program without any specific information.
Reference:
- [1] Qiskit Backend Specifications for OpenQASM and OpenPulse Experiments,
https://arxiv.org/abs/1809.03452
"""
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Dict, List, Union, Optional
from qiskit import pulse
from qiskit.providers import BaseBackend, BackendConfigurationError
class DrawerBackendInfo(ABC):
"""Backend information to be used for the drawing data generation."""
def __init__(self,
name: Optional[str] = None,
dt: Optional[float] = None,
channel_frequency_map: Optional[Dict[pulse.channels.Channel, float]] = None,
qubit_channel_map: Optional[Dict[int, List[pulse.channels.Channel]]] = None):
"""Create new backend information.
Args:
name: Name of the backend.
dt: System cycle time.
channel_frequency_map: Mapping of channel and associated frequency.
qubit_channel_map: Mapping of qubit and associated channels.
"""
self.backend_name = name or 'no-backend'
self._dt = dt
self._chan_freq_map = channel_frequency_map or dict()
self._qubit_channel_map = qubit_channel_map or dict()
@classmethod
@abstractmethod
def create_from_backend(cls, backend: BaseBackend):
"""Initialize a class with backend information provided by provider.
Args:
backend: Backend object.
"""
raise NotImplementedError
@property
def dt(self):
"""Return cycle time."""
return self._dt
def get_qubit_index(self, chan: pulse.channels.Channel) -> Union[int, None]:
"""Get associated qubit index of given channel object."""
for qind, chans in self._qubit_channel_map.items():
if chan in chans:
return qind
return chan.index
def get_channel_frequency(self, chan: pulse.channels.Channel) -> Union[float, None]:
"""Get frequency of given channel object."""
return self._chan_freq_map.get(chan, None)
class OpenPulseBackendInfo(DrawerBackendInfo):
"""Drawing information of backend that conforms to OpenPulse specification."""
@classmethod
def create_from_backend(cls, backend: BaseBackend):
"""Initialize a class with backend information provided by provider.
Args:
backend: Backend object.
Returns:
OpenPulseBackendInfo: New configured instance.
"""
configuration = backend.configuration()
defaults = backend.defaults()
# load name
name = backend.name()
# load cycle time
dt = configuration.dt
# load frequencies
chan_freqs = dict()
chan_freqs.update({pulse.DriveChannel(qind): freq
for qind, freq in enumerate(defaults.qubit_freq_est)})
chan_freqs.update({pulse.MeasureChannel(qind): freq
for qind, freq in enumerate(defaults.meas_freq_est)})
for qind, u_lo_mappers in enumerate(configuration.u_channel_lo):
temp_val = .0 + .0j
for u_lo_mapper in u_lo_mappers:
temp_val += defaults.qubit_freq_est[u_lo_mapper.q] * complex(*u_lo_mapper.scale)
chan_freqs[pulse.ControlChannel(qind)] = temp_val.real
# load qubit channel mapping
qubit_channel_map = defaultdict(list)
for qind in range(configuration.n_qubits):
qubit_channel_map[qind].append(configuration.drive(qubit=qind))
qubit_channel_map[qind].append(configuration.measure(qubit=qind))
for tind in range(configuration.n_qubits):
try:
qubit_channel_map[qind].extend(configuration.control(qubits=(qind, tind)))
except BackendConfigurationError:
pass
return OpenPulseBackendInfo(name=name,
dt=dt,
channel_frequency_map=chan_freqs,
qubit_channel_map=qubit_channel_map)
| 2.265625 | 2 |
django_gotolong/mfund/views.py | ParikhKadam/gotolong | 15 | 4332 | # Create your views here.
from .models import Mfund
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.tools import make_subplots
from django.db.models import Q
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.list import ListView
from django.views import View
from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min
from django.db.models.functions import Trim, Lower, Round
import pandas as pd
import csv, io
import openpyxl
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django_gotolong.lastrefd.models import Lastrefd, lastrefd_update
from django_gotolong.broker.icidir.imf.models import BrokerIcidirMf
def Mfund_url():
return "unused-mfund-refresh-url"
class MfundListView(ListView):
model = Mfund
# if pagination is desired
# paginate_by = 300
# filter_backends = [filters.OrderingFilter,]
# ordering_fields = ['sno', 'nse_symbol']
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id)
return queryset
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MfundListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Amount(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id).order_by('-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_AMC(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_amc', 'mf_category', 'mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_AMC_Amount(ListView):
model = Mfund
def get_queryset(self):
self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
values('mf_amc').annotate(scheme_sum=Sum('mf_nav_value')). \
exclude(scheme_sum=0.0).order_by('-scheme_sum')
print('hi ', self.queryset)
return self.queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
labels = []
values = []
labels_values_dict = {}
sum_total = 0
for q_row in self.queryset:
sum_total += q_row['scheme_sum']
labels_values_dict[q_row['mf_amc']] = q_row['scheme_sum']
context['sum_total'] = int(sum_total)
print('labels values dict', labels_values_dict)
for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]):
labels.append(k)
values.append(v)
print('labels ', labels)
print('values ', values)
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False)
context['plot_div_1'] = plot_div_1
return context
class MfundListView_Category(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_category', 'mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Subcat(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Reco(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_research_reco', '-mf_rating')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_SubcatAmount(ListView):
model = Mfund
def get_queryset(self):
self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
values('mf_subcat').annotate(scheme_sum=Sum('mf_nav_value')). \
exclude(scheme_sum=0.0).order_by('-scheme_sum')
return self.queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
labels = []
values = []
labels_values_dict = {}
sum_total = 0
for q_row in self.queryset:
sum_total += q_row['scheme_sum']
labels_values_dict[q_row['mf_subcat']] = q_row['scheme_sum']
context['sum_total'] = int(sum_total)
print('labels values dict', labels_values_dict)
for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]):
labels.append(k)
values.append(v)
print('labels ', labels)
print('values ', values)
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False)
context['plot_div_1'] = plot_div_1
return context
class MfundRefreshView(View):
debug_level = 1
def get(self, request):
self.mfund_refresh(request)
return HttpResponseRedirect(reverse("mfund-list"))
def __init__(self):
super(MfundRefreshView, self).__init__()
def mfund_refresh(self, request):
debug_level = 1
# declaring template
# first delete all existing mfund objects
Mfund.objects.all().filter(mf_user_id=request.user.id).delete()
max_id_instances = Mfund.objects.aggregate(max_id=Max('mf_id'))
max_mf_id = max_id_instances['max_id']
print('DS: found max id ', max_mf_id)
if max_mf_id is None:
max_mf_id = 0
print('max_mf_id ', max_mf_id)
unique_id = max_mf_id
for brec in BrokerIcidirMf.objects.all().filter(bim_user_id=request.user.id):
unique_id += 1
print(brec.bim_amc, brec.bim_name, brec.bim_category, brec.bim_subcat)
print(brec.bim_rating, brec.bim_units, brec.bim_cost_value, brec.bim_nav_value)
print(brec.bim_research_reco)
# skip 0 units
if int(float(brec.bim_units)) != 0:
_, created = Mfund.objects.update_or_create(
mf_id=unique_id,
mf_user_id=request.user.id,
mf_broker='icidir',
mf_amc=brec.bim_amc,
mf_name=brec.bim_name,
mf_category=brec.bim_category,
mf_subcat=brec.bim_subcat,
mf_rating=brec.bim_rating,
mf_cost_value=brec.bim_cost_value,
mf_nav_value=brec.bim_nav_value,
mf_research_reco=brec.bim_research_reco
)
# breakpoint()
# import pdb
# pdb.set_trace()
# Updated Gfundareco objects
lastrefd_update("mfund")
| 2.21875 | 2 |
m3u8.py | akria00/m3u8-Downloader-master | 2 | 4333 | #coding: utf-8
from gevent import monkey
monkey.patch_all()
from gevent.pool import Pool
import gevent
import requests
import urllib
import os
import time
import re
import ssl
class Downloader:
def __init__(self, pool_size, retry=3):
self.pool = Pool(pool_size)
self.session = self._get_http_session(pool_size, pool_size, retry)
self.retry = retry
self.dir = ''
self.succed = {}
self.failed = []
self.ts_total = 0
def _get_http_session(self, pool_connections, pool_maxsize, max_retries):
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=pool_connections, pool_maxsize=pool_maxsize, max_retries=max_retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def run(self, m3u8_url, dir='',moreTs=False):
self.dir = dir
if self.dir and not os.path.isdir(self.dir):
os.makedirs(self.dir)
r = self.session.get(m3u8_url, timeout=10)
if r.ok:
body = r.content
if body:
ssl._create_default_https_context = ssl._create_unverified_context
ts_list = [urllib.parse.urljoin(m3u8_url, n.strip()) for n in str(body, encoding = "utf8").split('\n') if n and not n.startswith("#")]
if moreTs:
ts_list = self.getMoreTsList(ts_list)
ts_list = list(zip(ts_list, [n for n in range(len(list(ts_list)))]))
if ts_list:
self.ts_total = len(ts_list)
print(self.ts_total)
g1 = gevent.spawn(self._join_file)
self._download(ts_list)
g1.join()
else:
print( r.status_code)
def _download(self, ts_list):
self.pool.map(self._worker, ts_list)
if self.failed:
ts_list = self.failed
self.failed = []
self._download(ts_list)
def _worker(self, ts_tuple):
url = ts_tuple[0]
index = ts_tuple[1]
retry = self.retry
while retry:
try:
r = self.session.get(url, timeout=20)
if r.ok:
file_name = url.split('/')[-1].split('?')[0]
print( file_name)
with open(os.path.join(self.dir, file_name), 'wb') as f:
f.write(r.content)
self.succed[index] = file_name
return
except:
retry -= 1
print ('[FAIL]%s' % url)
self.failed.append((url, index))
def _join_file(self):
index = 0
outfile = ''
while index < self.ts_total:
file_name = self.succed.get(index, '')
if file_name:
infile = open(os.path.join(self.dir, file_name), 'rb')
if not outfile:
outfile = open(os.path.join(self.dir, file_name.split('.')[0]+'_all.'+file_name.split('.')[-1]), 'wb')
outfile.write(infile.read())
infile.close()
os.remove(os.path.join(self.dir, file_name))
index += 1
else:
time.sleep(1)
if outfile:
outfile.close()
def getMoreTsList(self,ts_list):
headers = {'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'upgrade-insecure-requests':1,
'scheme':'https'
}
retry = self.retry
isOk = False
lastTs = ts_list[-1]
pattern = re.compile(r'(\d+\.?\d)\.ts')
tsNum = '{:0>3}'.format(int(pattern.findall(lastTs)[0]) + 1 )
nextTs = re.sub(pattern,str(tsNum),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
l = r = int(tsNum)
maxTs = 0
while retry or isOk:
try:
isOk = urllib.request.urlopen(req).status==200
if isOk:
retry = 3
l = r + 1
r = l + 100 if maxTs < r else maxTs - int((maxTs-l)/2)
nextTs = re.sub(pattern,'{:0>3}'.format(r),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
else:
r = r - int((r-l)/2)
except :
if int((r-l)/2) == 0:
for i in range(int(tsNum) , r):
ts_list.append(re.sub(pattern,'{:0>3}'.format(i),lastTs,1) + ".ts")
return ts_list
maxTs = r
r = r - int((r-l)/2)
nextTs = re.sub(pattern,'{:0>3}'.format(r),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
retry -= 1
isOk = False
return ts_list
if __name__ == '__main__':
downloader = Downloader(5)
downloader.run('https://www.xiaodianying.com/filets/2069/dp.m3u8', './video',True)
| 2.46875 | 2 |
buzzbox/restaurants/migrations/0002_restaurant_description.py | Danielvalev/kutiika | 0 | 4334 | <filename>buzzbox/restaurants/migrations/0002_restaurant_description.py
# Generated by Django 3.2.9 on 2021-12-06 10:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='restaurant',
name='description',
field=models.CharField(default='Description', max_length=255, verbose_name='Description'),
preserve_default=False,
),
]
| 1.78125 | 2 |
src/dsrlib/ui/utils.py | fraca7/dsremap | 8 | 4335 | <filename>src/dsrlib/ui/utils.py<gh_stars>1-10
#!/usr/bin/env python3
import os
import contextlib
from PyQt5 import QtCore, QtWidgets
from dsrlib.settings import Settings
class LayoutBuilder:
def __init__(self, target):
self.target = target
self._stack = []
@contextlib.contextmanager
def _layout(self, cls, *args, **kwargs):
layout = cls()
self._stack.append(layout)
try:
yield layout
finally:
self._pop(*args, **kwargs)
def _pop(self, *args, **kwargs):
layout = self._stack.pop()
if self._stack:
parent = self._stack[-1]
if isinstance(layout, QtWidgets.QSplitter):
parent.addWidget(layout)
else:
if isinstance(parent, QtWidgets.QSplitter):
container = QtWidgets.QWidget(parent)
container.setLayout(layout)
parent.addWidget(container)
else:
parent.addLayout(layout, *args, **kwargs)
elif isinstance(self.target, QtWidgets.QMainWindow):
if isinstance(layout, QtWidgets.QSplitter):
self.target.setCentralWidget(layout)
else:
container = QtWidgets.QWidget(self.target)
container.setLayout(layout)
self.target.setCentralWidget(container)
else:
if isinstance(layout, QtWidgets.QSplitter):
layout2 = QtWidgets.QHBoxLayout()
layout2.setContentsMargins(0, 0, 0, 0)
layout2.addWidget(layout)
self.target.setLayout(layout2)
else:
self.target.setLayout(layout)
@contextlib.contextmanager
def hbox(self, *args, **kwargs): # pragma: no cover
with self._layout(QtWidgets.QHBoxLayout, *args, **kwargs) as layout:
layout.setContentsMargins(1, 1, 1, 1)
layout.setSpacing(1)
yield layout
@contextlib.contextmanager
def vbox(self, *args, **kwargs): # pragma: no cover
with self._layout(QtWidgets.QVBoxLayout, *args, **kwargs) as layout:
layout.setContentsMargins(1, 1, 1, 1)
layout.setSpacing(1)
yield layout
def stack(self, *args, **kwargs): # pragma: no cover
return self._layout(QtWidgets.QStackedLayout, *args, **kwargs)
def form(self, *args, **kwargs):
class _FormLayout(QtWidgets.QFormLayout):
def addLayout(self, layout):
self.addRow(layout)
def addRow(self, label, widget=None): # pylint: disable=C0111
if isinstance(label, str):
label = QtWidgets.QLabel(label)
label.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
label.setAlignment(QtCore.Qt.AlignVCenter)
if widget is None:
super().addRow(label)
else:
super().addRow(label, widget)
return self._layout(_FormLayout, *args, **kwargs)
def split(self, *args, **kwargs): # pragma: no cover
return self._layout(QtWidgets.QSplitter, *args, **kwargs)
def getSaveFilename(parent, domain, extension):
with Settings().grouped('Paths') as settings:
path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation)
sname = 'save_%s' % domain
if settings.contains(sname):
path = settings.value(sname)
while True:
name, dummy = QtWidgets.QFileDialog.getSaveFileName(parent, _('Save'), path, '*.%s' % extension, options=QtWidgets.QFileDialog.DontConfirmOverwrite)
if not name:
return None
if not name.endswith('.%s' % extension):
name = '%s.%s' % (name, extension)
if os.path.exists(name):
resp = QtWidgets.QMessageBox.question(parent,
_('Overwrite file?'),
_('This file already exists. Overwrite?'),
QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No|QtWidgets.QMessageBox.Cancel)
if resp == QtWidgets.QMessageBox.Yes:
settings.setValue(sname, os.path.dirname(name))
return name
if resp == QtWidgets.QMessageBox.No:
continue
return None
settings.setValue(sname, os.path.dirname(name))
return name
def getOpenFilename(parent, domain, extension):
with Settings().grouped('Paths') as settings:
path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation)
sname = 'open_%s' % domain
if settings.contains(sname):
path = settings.value(sname)
name, dummy = QtWidgets.QFileDialog.getOpenFileName(parent, _('Open file'), path, '*.%s' % extension if extension else '')
if name:
settings.setValue(sname, os.path.dirname(name))
return name
return None
class EnumComboBox(QtWidgets.QComboBox):
valueChanged = QtCore.pyqtSignal(object)
def __init__(self, *args, enum, value=None, **kwargs):
super().__init__(*args, **kwargs)
self._enum = enum
for item in enum:
self.addItem(enum.label(item), item)
if value is not None:
self.setValue(value)
self.currentIndexChanged.connect(self._emit)
def setValue(self, value):
for index, item in enumerate(self._enum):
if value == item:
self.setCurrentIndex(index)
break
else:
raise ValueError('Value "%s" not found in enum' % str(value))
def _emit(self, _):
self.valueChanged.emit(self.currentData())
| 2.15625 | 2 |
src/tiden/tidenrunner.py | mshonichev/example_pkg | 0 | 4336 | <reponame>mshonichev/example_pkg<filename>src/tiden/tidenrunner.py
#!/usr/bin/env python3
#
# Copyright 2017-2020 GridGain Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tidenpluginmanager import PluginManager
from .report.steps import step, InnerReportConfig, Step, add_attachment, AttachmentType
from .util import log_print, unix_path, call_method, create_case, kill_stalled_java, exec_time
from .result import Result
from .util import write_yaml_file, should_be_skipped
from .logger import *
from .runner import get_test_modules, get_long_path_len, get_class_from_module, known_issue_str
from .priority_decorator import get_priority_key
from .sshpool import SshPool
from uuid import uuid4
from traceback import format_exc
from .runner import set_configuration_options, get_configuration_representation, get_actual_configuration
from importlib import import_module
from os import path, mkdir
from time import time
from shutil import copyfile
from os.path import join, basename
from glob import glob
import traceback
class TidenTestPlan:
all_tests = None
skipped_tests = None
tests_to_execute = None
def __init__(self):
self.all_tests = {}
self.skipped_tests = []
self.tests_to_execute = []
def update(self, other):
self.all_tests.update(other.all_tests)
self.skipped_tests.extend(other.skipped_tests)
self.tests_to_execute.extend(other.tests_to_execute)
class TidenRunner:
# {
# '<suite_name>.<test_file_name>': {
# 'path': <full-path-to-test-file>,
# 'module_short_name': <test_file_name>,
# }
# }
modules = None
# Tiden config dictionary
config = None
# Tiden SshPool instance
ssh_pool = None
# Tiden PluginManager instance
pm = None
# longest length of the test name
long_path_len = 0
# instance of Result class
result = None
# current test module, a key to self.modules dictionary
test_module = None
# == TidenTestPlan for all modules:
total = None
# dictionary of TidenTestPlan indexed by test module name
test_plan = {}
# == for current test module:
# a short name of test module, e.g. test module file name without .py extension
module_short_name = None
# a name of module' test class
test_class_name = None
# instance of current module' test case class
test_class = None
# == for current test within module:
# test name, with all configuration options
current_test_name = None
# test method name only
current_test_method = None
def __init__(self, config, **kwargs):
if kwargs.get('modules', None) is not None:
self.modules = kwargs.get('modules')
else:
self.modules = get_test_modules(config, collect_only=kwargs.get('collect_only'))
self.config = config
self.long_path_len = get_long_path_len(self.modules)
xunit_path_var = None
if kwargs.get('xunit_path'):
xunit_path_var = kwargs.get('xunit_path')
elif config.get('var_dir') and config.get('xunit_file'):
xunit_path_var = join(config.get('var_dir'), config.get('xunit_file'))
self.result = Result(xunit_path=xunit_path_var)
self.ssh_pool: SshPool = kwargs.get('ssh_pool')
self.pm: PluginManager = kwargs.get('plugin_manager')
def collect_tests(self):
"""
Collect tests from all modules.
"""
log_print("*** Collecting tests ***", color='blue')
long_path_len = get_long_path_len(self.modules)
from tiden.sshpool import AbstractSshPool
self.ssh_pool = AbstractSshPool({'hosts': []})
def empty_init(self, config, ssh_pool):
self.config = config
self.ssh = ssh_pool
self.__prepare_session_vars()
for test_module in sorted(self.modules.keys()):
# cleanup instance vars
self.test_plan[test_module] = TidenTestPlan()
self.__prepare_module_vars(test_module, fake_init=empty_init)
self.__print_current_module_name()
test_method_names = sorted(list(self.gen_tests(self.test_class)))
self.create_test_module_attr_yaml(test_method_names)
self.collect_tests0(test_method_names)
self.total.update(self.test_plan[test_module])
log_print("*** Found %s tests. %s skipped. Going to 'run' %s tests ***" % (
len(self.total.all_tests),
len(self.total.skipped_tests),
len(self.total.tests_to_execute)
), color='blue')
test_cnt = 0
# Skipped tests do not hit collect report
# Now generate results for 'executed' tests
for test_module in sorted(self.modules.keys()):
self.__prepare_module_vars(test_module, fake_init=empty_init)
test_plan = self.test_plan[self.test_module]
for test_name in sorted(test_plan.tests_to_execute):
test_param = test_plan.all_tests[test_name]
self.__prepare_test_vars(**test_param)
test_cnt = test_cnt + 1
self.result.start_testcase(self.test_class, self.current_test_name)
self.__print_found_test_method_to_execute(long_path_len, test_cnt, test_module)
self.result.stop_testcase('pass')
def process_tests(self):
"""
Run all tests
:return:
"""
log_print("*** Tests ***", color='blue')
self.__prepare_session_vars()
# Check requirements for applications
for test_module in sorted(self.modules.keys()):
module = import_module("suites.%s" % test_module)
test_class_name = get_class_from_module(self.modules[test_module]['module_short_name'])
test_class = getattr(module, test_class_name)(self.config, self.ssh_pool)
if hasattr(test_class, 'check_requirements'):
test_class.check_requirements()
for test_module in sorted(self.modules.keys()):
# cleanup instance vars
self.test_plan[test_module] = TidenTestPlan()
self.__prepare_module_vars(test_module)
# find test methods:
if hasattr(self.test_class, '__configurations__'):
cfg_options = getattr(self.test_class, '__configuration_options__')
configuration = get_actual_configuration(self.config, cfg_options)
log_print("Configuration options for %s:\n%s" % (self.test_class.__class__.__name__,
'\n'.join([
'\t' + cfg_option_name + '=' + str(
configuration[i])
for i, cfg_option_name in enumerate(cfg_options)
])),
color='blue')
else:
cfg_options = None
configuration = None
test_method_names = list(self.gen_tests(self.test_class))
self.collect_tests1(test_method_names, common_test_param={
'configuration': configuration,
'cfg_options': cfg_options,
})
test_plan = self.test_plan[self.test_module]
if len(test_plan.skipped_tests) > 0:
self._skip_tests()
if len(test_plan.tests_to_execute) > 0:
tests_to_execute = sorted(test_plan.tests_to_execute, key=get_priority_key(self.test_class))
log_print("*** Found %s tests in %s. %s skipped. Going to run %s tests ***\n%s" % (
len(test_plan.all_tests), self.test_class_name, len(test_plan.skipped_tests),
len(test_plan.tests_to_execute),
'\n'.join([
test_plan.all_tests[test_name]['test_method_name']
for test_name in tests_to_execute
])),
color='blue')
# Execute module setup
setup_passed = self.__call_module_setup_teardown('setup')
if setup_passed:
self._run_tests(tests_to_execute)
# Execute module teardown
self.__call_module_setup_teardown('teardown')
# this is for correct fail in Jenkins
if not setup_passed:
exit(1)
def create_test_module_attr_yaml(self, test_method_names):
# create attr.yaml
for current_test_name in test_method_names:
test_function = getattr(self.test_class, current_test_name)
create_case(test_function)
def __prepare_session_vars(self):
self.test_plan = {}
self.total = TidenTestPlan()
def __prepare_module_vars(self, module_name, fake_init=None):
"""
Prepare per-module initialization of internal variables:
Expects self.test_module be set to proper full name of module under 'suites' directory
sets up
self.test_class_name
self.module_short_name
self.test_class - creates instance of test case class
resets
self.all_tests, self.tests_to_execute, self.skipped_tests
config
fills in config['rt'], config['rt']['remote']
Creates test module working local and remote directories.
Copies resources from suite directory to local test module working directory.
:param module_name: name of the module to prepare
:param fake_init: do not init module
:return:
"""
self.test_module = module_name
# fill new module vars
self.module_short_name = self.modules[self.test_module]['module_short_name']
test_module_dir = "%s/%s" % (self.config['suite_var_dir'], self.module_short_name)
remote_test_module_dir = "%s/%s" % (self.config['remote']['suite_var_dir'], self.module_short_name)
self.test_class_name = get_class_from_module(self.module_short_name)
# Update Tiden config
self.config['rt'] = {
'test_class': self.test_class_name,
'test_method': None,
'test_module': self.test_module,
'test_module_name': self.module_short_name,
'test_module_dir': test_module_dir,
'remote': {
'test_module_dir': remote_test_module_dir,
}
}
module = import_module("suites.%s" % self.test_module)
# used for collect_only
if fake_init:
self.test_class = getattr(module, self.test_class_name)
self.test_class.__init__ = fake_init
self.test_class = getattr(module, self.test_class_name)(self.config, self.ssh_pool)
else:
# for process tests - prepare test directory and resources
self.__create_test_module_directory(remote_test_module_dir, test_module_dir)
self.test_class = getattr(module, self.test_class_name)(self.config, self.ssh_pool)
if hasattr(self.test_class, 'tiden'):
self.__copy_resources_to_local_test_module_directory()
# Set ssh and config apps model classes
self.test_class.tiden.config = self.config
self.test_class.tiden.ssh = self.ssh_pool
self.test_class.config = self.config
self.test_class.ssh = self.ssh_pool
self._save_config()
def __prepare_test_vars(self, test_method_name=None, configuration=None, cfg_options=None, **kwargs):
if not test_method_name:
return
self.test_iteration = 1
self.current_test_method = test_method_name
if hasattr(self.test_class, '__configurations__'):
if cfg_options is None:
cfg_options = getattr(self.test_class, '__configuration_options__')
if configuration is None:
configuration = get_actual_configuration(self.config, cfg_options)
configuration_representation = get_configuration_representation(cfg_options, configuration)
self.current_test_name = self.current_test_method + configuration_representation
else:
self.current_test_name = self.current_test_method
def collect_test0(self):
# collect test params
test_params = {
'test_name': self.current_test_name,
}
test_function = getattr(self.test_class, self.current_test_method)
# first setup fixture
if hasattr(test_function, "__setup__"):
setup_fixture = getattr(test_function, "__setup__")
if type(setup_fixture) == type(''):
setup_method = getattr(self.test_class, setup_fixture)
else:
setup_method = setup_fixture
test_params['setup_test_params'] = True
test_params['setup_test_method'] = setup_method
# next, teardown fixture
if hasattr(test_function, "__teardown__"):
teardown_fixture = getattr(test_function, "__teardown__")
teardown_method = getattr(self.test_class, teardown_fixture)
test_params['teardown_test_method'] = teardown_method
# don't forget known issues
if hasattr(test_function, "__known_issues__"):
known_issue = getattr(test_function, "__known_issues__")
test_params['known_issue'] = known_issue
# test by default runs only once,
# unless repeated_test_count set explicitly by decorator or framework option
repeat_count = 1
# here, we check --to=repeated_test=N and --to=repeated_test.test_name=N options
# and decorate test with @repeated_test automagically if that's required
if self.config.get('repeated_test'):
repeated_test_option = self.config['repeated_test']
re_decorate = False
if type({}) != type(repeated_test_option):
# if option was given as --to=repeated_test=N, re-decorate all tests
re_decorate = True
repeat_count = int(repeated_test_option)
elif self.current_test_method in repeated_test_option.keys():
# otherwise re-decorate only if test name matches given option
re_decorate = True
repeat_count = int(repeated_test_option[self.current_test_method])
if re_decorate:
from tiden.util import repeated_test
original_test = test_function
if hasattr(original_test, 'repeated_test_name'):
# that test was previously decorated by @repeated_test, extract original test_names
original_names = original_test.repeated_test_name
decorated_test = repeated_test(repeat_count,
test_names=original_names)(original_test.__func__)
else:
# that's a brand new decoration
decorated_test = repeated_test(repeat_count)(original_test.__func__)
# this magic required to convert decorated test function to method of a test class
from types import MethodType
setattr(self.test_class, self.current_test_method, MethodType(decorated_test, self.test_class))
test_function = getattr(self.test_class, self.current_test_method)
if hasattr(test_function, 'repeated_test_count'):
repeat_count = test_function.repeated_test_count
repeated_test_name = test_function.repeated_test_name
test_params['repeated_test_count'] = repeat_count
test_params['repeated_test_name'] = repeated_test_name
test_params['continue_on_fail'] = self.config.get('repeated_test_continue_on_fail', False)
return test_params
def _skip_tests(self):
test_plan = self.test_plan[self.test_module]
skipped_tests = sorted(test_plan.skipped_tests)
try:
for current_test in skipped_tests:
test_param = test_plan.all_tests[current_test]
self.__prepare_test_vars(**test_param)
pad_string = self.__get_pad_string(msg=self.current_test_method)
self.result.skip_testcase_no_start(self.test_class, self.current_test_name,
skip_message=test_param['skip_msg'],
skip_no_start=test_param['skip_no_start'])
self.result.update_xunit()
log_print("%s %s" % (pad_string, test_param['skip_msg']), color='yellow')
finally:
self.current_test_name = None
self.current_test_method = None
def _run_tests(self, tests_to_execute):
test_plan = self.test_plan[self.test_module]
try:
for test_cnt, current_test in enumerate(tests_to_execute, start=1):
test_param = test_plan.all_tests[current_test]
self.__prepare_test_vars(**test_param)
repeated_test_count = test_param.get('repeated_test_count', 1)
repeated_test_continue_on_fail = test_param.get('continue_on_fail')
test_with_iterations = True if repeated_test_count > 1 else False
pad_string = self.__get_pad_string()
log_print("%s started (%s from %s)" % (pad_string, test_cnt, len(tests_to_execute)), color='yellow')
for self.test_iteration in range(repeated_test_count):
if test_with_iterations:
log_print("{} started (iteration {} from {})".format(pad_string,
self.test_iteration + 1,
repeated_test_count), color='yellow')
test_status = self._run_test()
if test_with_iterations and test_status != 'pass' and not repeated_test_continue_on_fail:
self.result.update_test_name('{}_iteration_{}'.format(current_test, self.test_iteration + 1))
break
finally:
self.current_test_name = None
self.current_test_method = None
def _run_test(self):
setattr(self, '_secret_report_storage', InnerReportConfig())
test_exception = None
tb_msg = None
test_status = 'pass'
pad_string = self.__get_pad_string()
started = int(time())
known_issue = self.test_plan[self.test_module].all_tests[self.current_test_name].get('known_issue')
setattr(self.test_class, '_secret_report_storage', InnerReportConfig())
try:
self.pm.do("before_test_method",
test_module=self.test_module,
test_name=self.current_test_name,
artifacts=self.config.get('artifacts', {}))
self.result.start_testcase(self.test_class, self.current_test_name)
self.__update_config_and_save(current_method_name=self.current_test_name)
# Execute test setup method
self.__call_test_setup_teardown('setup')
# self.__print_with_format()
with Step(self, 'Execution'):
try:
call_method(self.test_class, self.current_test_method)
finally:
self.__set_child_steps_to_parent()
self.__save_logs()
log_print(f"{pad_string} passed {exec_time(started)}", color='green')
except (AssertionError, TidenException) as e:
test_status = 'fail'
test_exception = e
tb_msg = traceback.format_exc()
except Exception as e:
test_status = 'error'
test_exception = e
tb_msg = traceback.format_exc()
finally:
if test_status != 'pass':
log_print(tb_msg, color='red')
log_print("{} {} {}{}".format(pad_string,
test_status,
exec_time(started),
known_issue_str(known_issue)),
color='red')
self.result.stop_testcase(
test_status,
e=test_exception,
tb=tb_msg,
known_issue=known_issue,
run_info=self.test_class.get_run_info() if hasattr(self.test_class, 'get_run_info') else None
)
# Execute test teardown method
self.__call_test_setup_teardown('teardown')
self.pm.do('after_test_method',
test_status=test_status,
exception=test_exception,
stacktrace=tb_msg,
known_issue=known_issue,
description=getattr(self.test_class, self.current_test_method, lambda: None).__doc__,
inner_report_config=getattr(self, '_secret_report_storage'))
# Kill java process if teardown function didn't kill nodes
if not hasattr(self.test_class, 'keep_ignite_between_tests'):
kill_stalled_java(self.ssh_pool)
return test_status
@step('logs')
def __save_logs(self):
test_dir = self.config.get('rt', {}).get('remote', {}).get('test_dir')
if 'WardReport' in self.config.get('plugins', []):
report_config = self.config['plugins']['WardReport']
files_receiver_url = report_config['files_url']
upload_logs = report_config['upload_logs']
else:
return
if test_dir:
try:
for host_ip, output_lines in self.ssh_pool.exec([f"ls {test_dir}"]).items():
with Step(self, host_ip):
for line in output_lines:
file_name: str
for file_name in line.split('\n'):
if file_name and file_name.endswith('.log'):
send_file_name = f'{uuid4()}_{file_name}'
add_attachment(self, file_name, send_file_name, AttachmentType.FILE)
if upload_logs:
cmd = f'cd {test_dir}; ' \
f'curl -H "filename: {send_file_name}" ' \
f'-F "file=@{file_name};filename={file_name}" ' \
f'{files_receiver_url}/files/add'
self.ssh_pool.exec_on_host(host_ip, [cmd])
except:
log_print(f'Failed to send report. \n{format_exc()}', color='pink')
def __copy_resources_to_local_test_module_directory(self):
"""
Copy resources in test resource directory
:return:
"""
test_resource_dir = "%s/res" % self.config['rt']['test_module_dir']
if not path.exists(test_resource_dir):
mkdir(test_resource_dir)
self.config['rt']['resource_dir'] = "%s/res/%s" % (self.config['suite_dir'], self.module_short_name[5:])
for file in glob("%s/*" % self.config['rt']['resource_dir']):
if path.isfile(file):
copyfile(file, f"{test_resource_dir}/{basename(file)}")
self.config['rt']['test_resource_dir'] = unix_path(test_resource_dir)
def __create_test_module_directory(self, remote_test_module_dir, test_module_dir):
mkdir(test_module_dir)
self.ssh_pool.exec([f'mkdir -p {remote_test_module_dir}'])
@step('{method_name}')
def __call_test_setup_teardown(self, method_name):
method_to_execute = None
try:
self._call_plugin_manager(f'before_test_method_{method_name}')
all_tests = self.test_plan[self.test_module].all_tests
if all_tests[self.current_test_name].get(f'{method_name}_test_method'):
method_to_execute = all_tests[self.current_test_name].get(f'{method_name}_test_method')
self.__print_with_format(msg=str(method_to_execute.__name__))
try:
if all_tests[self.current_test_name].get(f'{method_name}_test_params'):
method_to_execute(self.test_class)
else:
method_to_execute()
except Exception as e:
log_print(f'!!! Exception in {method_name} code !!!', color='red')
log_print(traceback.format_exc())
try:
self.__save_logs()
except:
log_print(f'Failed to get logs\n{traceback.format_exc()}', color='pink')
# if exception in setup method then re-raise the exception as we should fail the test
if method_name == 'setup':
raise e
finally:
self.__set_child_steps_to_parent()
self._call_plugin_manager(f'after_test_method_{method_name}')
def __set_child_steps_to_parent(self):
exec_report: InnerReportConfig = getattr(self.test_class, '_secret_report_storage', None)
test_report: InnerReportConfig = getattr(self, '_secret_report_storage')
idx_to_add = None
for idx, test_step in enumerate(test_report.steps):
if test_step['status'] is None:
idx_to_add = idx
break
test_report.steps[idx_to_add]['children'] = exec_report.steps + test_report.steps[idx_to_add].get('children', [])
title = getattr(getattr(self.test_class, self.current_test_method), '__report_title__', None)
suites = getattr(getattr(self.test_class, self.current_test_method), '__report_suites__', None)
if title:
test_report.title = title
test_report.suites = suites
setattr(self, '_secret_report_storage', test_report)
setattr(self.test_class, '_secret_report_storage', InnerReportConfig())
def __call_module_setup_teardown(self, fixture_name):
"""
Execute test module setup/teardown fixture.
:param fixture_name: either 'setup' or 'teardown'
:return:
"""
self._call_plugin_manager('before_test_class_%s' % fixture_name)
fixture_passed = True
try:
if hasattr(self.test_class, fixture_name):
started = time()
try:
self.__print_with_format('started', current_method_name=fixture_name)
self.__update_config_and_save(current_method_name=fixture_name)
# Execute setup or teardown method
call_method(self.test_class, fixture_name)
self.__print_with_format('finished in %s sec' % (int(time() - started)),
current_method_name=fixture_name)
# except (AssertionError, TidenException) as e:
except Exception as e:
fixture_passed = False
self.__print_with_format('failed in %s sec' % (int(time() - started)),
current_method_name=fixture_name)
log_print('Exception in %s.%s.%s: %s\n%s' %
(self.test_module, self.test_class_name, fixture_name,
str(e), str(traceback.format_exc())), color='red')
finally:
self._call_plugin_manager('after_test_class_%s' % fixture_name)
return fixture_passed
def _call_plugin_manager(self, execution_point):
args = [self.test_module, self.test_class]
if self.current_test_method:
args.append(self.current_test_method)
self.pm.do(execution_point, *args)
def __update_config_and_save(self, current_method_name=None):
test_method = current_method_name if current_method_name else self.current_test_method
test_method_name = test_method.split('(')[0] if '(' in test_method else test_method
test_dir_name = test_method_name
all_tests = self.test_plan[self.test_module].all_tests
# cause of repeated_tests decorator
if all_tests.get(test_method) and all_tests[test_method].get('repeated_test_name'):
test_dir_name = '{}_{}'.format(
test_method_name,
all_tests[test_method].get('repeated_test_name')[self.test_iteration])
self.config['rt']['test_method'] = test_method_name
self.config['rt']['remote']['test_dir'] = "{}/{}/{}".format(
self.config['rt']['remote']['test_module_dir'],
self.config['rt']['test_class'],
test_dir_name
)
self.config['rt']['test_dir'] = "{}/{}/{}".format(
self.config['rt']['test_module_dir'], self.config['rt']['test_class'], test_dir_name)
try:
create_remote_dir = [
'mkdir -p %s/%s/%s' % (self.config['rt']['remote']['test_module_dir'],
self.test_class_name, str(test_dir_name)),
'ln -sfn %s %s/current_test_directory' % (self.config['rt']['remote']['test_module_dir'],
self.config['environment']['home'])
]
self.ssh_pool.exec(create_remote_dir)
except Exception:
log_print("Can't create symlink to current test", color='red')
self._save_config()
def _check_test_for_skip(self):
attribs = []
skip_test = False
skip_msg = None
skip_no_start = False
test_function = getattr(self.test_class, self.current_test_method)
if hasattr(test_function, "__attrib__"):
attribs = getattr(test_function, "__attrib__")
attribs.append(str(self.current_test_method))
# if attr is passed to runner and test is not marked with one of the attribute
# then skip it.
if 'mute' in attribs:
skip_msg = 'skipped cause test is MUTED'
known_issue = None
if hasattr(test_function, "__known_issues__"):
known_issue = getattr(test_function, "__known_issues__")
if known_issue:
skip_msg = '{} cause of {}'.format(skip_msg, known_issue)
skip_test = True
skip_no_start = True
elif self.config.get('attrib') and should_be_skipped(self.config.get('attrib'), attribs,
self.config.get('attr_match', 'any')):
skip_msg = 'skipped cause of attrib mismatch'
skip_test = True
skip_no_start = True
if hasattr(test_function, "__skipped__"):
skip_msg = 'skipped cause of %s' % test_function.__skipped_message__
skip_test = True
if hasattr(test_function, "__skip_cond__"):
skip_condition = getattr(test_function, "__skip_cond__")
conditions_met, skip_message = skip_condition(self.config)
if not conditions_met:
skip_msg = 'skipped cause of %s' % skip_message
skip_test = True
if hasattr(test_function, "__skip_conds__") and \
len(test_function.__skip_conds__) > 0:
skip_conditions = test_function.__skip_conds__
for skip_condition in skip_conditions:
conditions_met, skip_message = skip_condition(self.test_class)
if not conditions_met:
skip_msg = 'skipped cause of %s' % skip_message
skip_test = True
return skip_test, skip_msg, skip_no_start
def get_tests_results(self):
return self.result
def _save_config(self):
write_yaml_file(self.config['config_path'], self.config)
@staticmethod
def gen_tests(test_class):
"""
Generates all test method of given test class
:param test_class:
:return:
"""
for class_attr in dir(test_class):
if class_attr.startswith('test_'):
yield class_attr
def collect_tests0(self, test_method_names):
"""
Collect given set of tests from test module for all configurations
:param test_method_names:
:return:
"""
if not hasattr(self.test_class, '__configurations__'):
self.collect_tests1(test_method_names)
else:
cfg_options = getattr(self.test_class, '__configuration_options__').copy()
configurations = getattr(self.test_class, '__configurations__').copy()
for configuration in configurations:
# set configuration options from given configuration to Tiden config,
# so that test can check options and skip itself
set_configuration_options(cfg_options, self.config, configuration)
self.collect_tests1(test_method_names, common_test_param={
'configuration': configuration,
'cfg_options': cfg_options,
})
def collect_tests1(self, test_method_names, common_test_param={}):
"""
Collect given tests from current test module
:param test_method_names:
:param common_test_param:
:return:
"""
try:
test_plan = self.test_plan[self.test_module]
for test_method_name in test_method_names:
self.__prepare_test_vars(test_method_name, **common_test_param)
test_param = {
'test_method_name': test_method_name,
}
is_skipped, skip_msg, skip_no_start = self._check_test_for_skip()
test_param.update(self.collect_test0())
repeat_count = test_param.get('repeated_test_count', 1)
if repeat_count > 0:
if repeat_count == 1:
# don't rename tests when only one iteration requested
test_param['repeated_test_name'] = []
else:
# rare case, skip by --to=repeated_test.test_name=0
is_skipped = True
skip_msg = 'skipped due to repeated_test iterations <= 0'
skip_no_start = False
if is_skipped:
test_param.update({
'skip_msg': skip_msg,
'skip_no_start': skip_no_start,
})
test_plan.skipped_tests.append(self.current_test_name)
else:
if common_test_param:
test_param.update(common_test_param)
test_plan.tests_to_execute.append(self.current_test_name)
test_plan.all_tests[self.current_test_name] = test_param.copy()
finally:
self.current_test_method = None
self.current_test_name = None
def __print_found_test_method_to_execute(self, long_path_len, test_cnt, test_module):
method_long_name = "%s.%s.%s " % (test_module, self.test_class_name, self.current_test_name)
pad_string = method_long_name.ljust(long_path_len, '.')
log_print("%s found (%s from %s)" % (pad_string, test_cnt, len(self.total.tests_to_execute)), color='yellow')
def __print_with_format(self, msg='', current_method_name=''):
if not current_method_name:
if self.current_test_method:
current_method_name = self.current_test_method
else:
current_method_name = ''
log_print("[{}][.{}.{}] {}".format(
datetime.now().isoformat()[11:-7],
self.test_class_name,
current_method_name,
msg))
def __print_current_module_name(self):
log_print("[%s][%s]" % (
datetime.now().isoformat()[11:-7], self.test_module))
def __get_pad_string(self, msg=None):
return ("%s.%s.%s " % (
self.test_module, self.test_class_name, msg if msg else self.current_test_method)) \
.ljust(self.long_path_len, '.')
| 1.859375 | 2 |
ludwig/data/cache/manager.py | ludwig-ai/ludw | 970 | 4337 | import logging
import os
import re
import uuid
from pathlib import Path
from ludwig.constants import CHECKSUM, META, TEST, TRAINING, VALIDATION
from ludwig.data.cache.util import calculate_checksum
from ludwig.utils import data_utils
from ludwig.utils.fs_utils import delete, path_exists
logger = logging.getLogger(__name__)
def alphanum(v):
"""Filters a string to only its alphanumeric characters."""
return re.sub(r"\W+", "", v)
class DatasetCache:
def __init__(self, config, checksum, cache_map, dataset_manager):
self.config = config
self.checksum = checksum
self.cache_map = cache_map
self.dataset_manager = dataset_manager
def get(self):
training_set_metadata_fp = self.cache_map[META]
if not path_exists(training_set_metadata_fp):
return None
cache_training_set_metadata = data_utils.load_json(training_set_metadata_fp)
cached_training_set = self.cache_map[TRAINING] if path_exists(self.cache_map[TRAINING]) else None
cached_test_set = self.cache_map[TEST] if path_exists(self.cache_map[TEST]) else None
cached_validation_set = self.cache_map[VALIDATION] if path_exists(self.cache_map[VALIDATION]) else None
valid = self.checksum == cache_training_set_metadata.get(CHECKSUM) and cached_training_set is not None
return valid, cache_training_set_metadata, cached_training_set, cached_test_set, cached_validation_set
def put(self, training_set, test_set, validation_set, training_set_metadata):
logger.info("Writing preprocessed training set cache")
training_set = self.dataset_manager.save(
self.cache_map[TRAINING],
training_set,
self.config,
training_set_metadata,
TRAINING,
)
if test_set is not None:
logger.info("Writing preprocessed test set cache")
test_set = self.dataset_manager.save(
self.cache_map[TEST],
test_set,
self.config,
training_set_metadata,
TEST,
)
if validation_set is not None:
logger.info("Writing preprocessed validation set cache")
validation_set = self.dataset_manager.save(
self.cache_map[VALIDATION],
validation_set,
self.config,
training_set_metadata,
VALIDATION,
)
logger.info("Writing train set metadata")
data_utils.save_json(self.cache_map[META], training_set_metadata)
return training_set, test_set, validation_set, training_set_metadata
def delete(self):
for fname in self.cache_map.values():
if path_exists(fname):
delete(fname)
class CacheManager:
def __init__(self, dataset_manager, cache_dir=None):
self._dataset_manager = dataset_manager
self._cache_dir = cache_dir
def get_dataset_cache(self, config, dataset=None, training_set=None, test_set=None, validation_set=None):
if dataset is not None:
key = self.get_cache_key(dataset, config)
cache_map = {
META: self.get_cache_path(dataset, key, META, "json"),
TRAINING: self.get_cache_path(dataset, key, TRAINING),
TEST: self.get_cache_path(dataset, key, TEST),
VALIDATION: self.get_cache_path(dataset, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
else:
key = self.get_cache_key(training_set, config)
cache_map = {
META: self.get_cache_path(training_set, key, META, "json"),
TRAINING: self.get_cache_path(training_set, key, TRAINING),
TEST: self.get_cache_path(test_set, key, TEST),
VALIDATION: self.get_cache_path(validation_set, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
def get_cache_key(self, dataset, config):
if not isinstance(dataset, str):
# TODO(travis): could try hashing the in-memory dataset, but this is tricky for Dask
return str(uuid.uuid1())
return calculate_checksum(dataset, config)
def get_cache_path(self, dataset, key, tag, ext=None):
if not isinstance(dataset, str):
dataset = None
if self._cache_dir is None and dataset is not None:
# Use the input dataset filename (minus the extension) as the cache path
stem = Path(dataset).stem
else:
# To avoid collisions across different directories, we use the unique checksum
# as the cache path
stem = alphanum(key)
ext = ext or self.data_format
cache_fname = f"{stem}.{tag}.{ext}"
return os.path.join(self.get_cache_directory(dataset), cache_fname)
def get_cache_directory(self, input_fname):
if self._cache_dir is None:
if input_fname is not None:
return os.path.dirname(input_fname)
return "."
return self._cache_dir
def can_cache(self, skip_save_processed_input):
return self._dataset_manager.can_cache(skip_save_processed_input)
@property
def data_format(self):
return self._dataset_manager.data_format
| 2.390625 | 2 |
test_calc_base.py | kshshkim/factorioCalcPy | 1 | 4338 | import pprint
from FactorioCalcBase.data.binary import sorted_recipe_list, production_machine_category_list_dict
from FactorioCalcBase.recipe import Recipe
from FactorioCalcBase.calculator_base import CalculatorBase
from FactorioCalcBase.dependency_dict_common_function import dict_add_number
import time
def test_change_machine(test_obj: CalculatorBase, target_recipe, failed_dict):
recipe_obj = Recipe(recipe_name=target_recipe)
cat = recipe_obj.get_category()
available_machine_list = production_machine_category_list_dict.get(cat)
failed_dict['method_failed']['change_machine_failed'] = {}
if len(available_machine_list) > 1:
for machine in available_machine_list:
test_obj.change_machine_to_specific_block(recipe_name=target_recipe,
machine_name=machine)
if test_obj.block_obj_dict['recipe']['machine_name'] != machine:
raise 'MachineNotChanged'
def test_calculator_base_methods(test_obj: CalculatorBase, failed_dict: dict):
recipe_list = list(test_obj.block_obj_dict['recipe'].keys())
for recipe in recipe_list:
try:
test_change_machine(test_obj, recipe, failed_dict)
except:
dict_add_number(failed_dict['method_failed']['change_machine_failed'], recipe, 1)
def test_calculator_base(failed_dict):
mrms = [0, 0.3]
pm = [None, ["assembling-machine-2", "stone-furnace", "burner-mining-drill"]]
uk = [True, False]
am = [1, 101.5]
failed_dict['init_failed'] = {}
failed_dict['method_failed'] = {
'change_machine_failed': {
}
}
for recipe in sorted_recipe_list:
for mining_research_modifier in mrms:
for preferred_machines in pm:
for use_kovarex in uk:
for amount in am:
try:
test_obj = CalculatorBase(recipe_name=recipe, amount=amount,
preferred_machine_list=preferred_machines,
use_kovarex=use_kovarex,
mining_research_modifier=mining_research_modifier)
except:
dict_add_number(failed_dict['init_failed'], key=recipe, val=1)
test_calculator_base_methods(test_obj, failed_dict)
pprint.pp(failed_dict)
return failed_dict
def run_test():
start_time = time.time()
test_calculator_base({})
print(f'finished in {time.time()-start_time}')
| 2.328125 | 2 |
lib/py/src/Thrift.py | ahfeel/thrift | 3 | 4339 | # Copyright (c) 2006- Facebook
# Distributed under the Thrift Software License
#
# See accompanying file LICENSE or visit the Thrift site at:
# http://developers.facebook.com/thrift/
class TType:
STOP = 0
VOID = 1
BOOL = 2
BYTE = 3
I08 = 3
DOUBLE = 4
I16 = 6
I32 = 8
I64 = 10
STRING = 11
UTF7 = 11
STRUCT = 12
MAP = 13
SET = 14
LIST = 15
UTF8 = 16
UTF16 = 17
class TMessageType:
CALL = 1
REPLY = 2
EXCEPTION = 3
class TProcessor:
"""Base class for procsessor, which works on two streams."""
def process(iprot, oprot):
pass
class TException(Exception):
"""Base class for all thrift exceptions."""
def __init__(self, message=None):
Exception.__init__(self, message)
self.message = message
class TApplicationException(TException):
"""Application level thrift exceptions."""
UNKNOWN = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
def __str__(self):
if self.message:
return self.message
elif self.type == UNKNOWN_METHOD:
return 'Unknown method'
elif self.type == INVALID_MESSAGE_TYPE:
return 'Invalid message type'
elif self.type == WRONG_METHOD_NAME:
return 'Wrong method name'
elif self.type == BAD_SEQUENCE_ID:
return 'Bad sequence ID'
elif self.type == MISSING_RESULT:
return 'Missing result'
else:
return 'Default (unknown) TApplicationException'
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.type = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
oprot.writeStructBegin('TApplicationException')
if self.message != None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.type != None:
oprot.writeFieldBegin('type', TType.I32, 2)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
| 1.703125 | 2 |
engine.py | nyumaya/wake-word-benchmark | 0 | 4340 | <gh_stars>0
#
# Copyright 2018 Picovoice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from collections import namedtuple
from enum import Enum
import numpy as np
from pocketsphinx import get_model_path
from pocketsphinx.pocketsphinx import Decoder
from engines import Porcupine
from engines import snowboydetect
from engines import AudioRecognition, FeatureExtractor
class Engines(Enum):
POCKET_SPHINX = 'PocketSphinx'
PORCUPINE = 'Porcupine'
SNOWBOY = 'Snowboy'
NYUMAYA = 'Nyumaya'
SensitivityInfo = namedtuple('SensitivityInfo', 'min, max, step')
class Engine(object):
def process(self, pcm):
raise NotImplementedError()
def release(self):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
@staticmethod
def frame_length(engine_type):
if engine_type is Engines.NYUMAYA:
return 1600
else:
return 512
@staticmethod
def sensitivity_info(engine_type):
if engine_type is Engines.POCKET_SPHINX:
return SensitivityInfo(-21, 15, 3)
elif engine_type is Engines.PORCUPINE:
return SensitivityInfo(0, 1, 0.1)
elif engine_type is Engines.SNOWBOY:
return SensitivityInfo(0, 1, 0.05)
elif engine_type is Engines.NYUMAYA:
return SensitivityInfo(0, 1, 0.1)
else:
raise ValueError("no sensitivity range for '%s'", engine_type.value)
@staticmethod
def create(engine, keyword, sensitivity):
if engine is Engines.POCKET_SPHINX:
return PocketSphinxEngine(keyword, sensitivity)
elif engine is Engines.PORCUPINE:
return PorcupineEngine(keyword, sensitivity)
elif engine is Engines.SNOWBOY:
return SnowboyEngine(keyword, sensitivity)
elif engine is Engines.NYUMAYA:
return NyumayaEngine(keyword, sensitivity)
else:
ValueError("cannot create engine of type '%s'", engine.value)
class PocketSphinxEngine(Engine):
def __init__(self, keyword, sensitivity):
config = Decoder.default_config()
config.set_string('-logfn', '/dev/null')
config.set_string('-hmm', os.path.join(get_model_path(), 'en-us'))
config.set_string('-dict', os.path.join(get_model_path(), 'cmudict-en-us.dict'))
config.set_string('-keyphrase', keyword if keyword != 'snowboy' else 'snow boy')
config.set_float('-kws_threshold', 10 ** -sensitivity)
self._decoder = Decoder(config)
self._decoder.start_utt()
def process(self, pcm):
assert pcm.dtype == np.int16
self._decoder.process_raw(pcm.tobytes(), False, False)
detected = self._decoder.hyp()
if detected:
self._decoder.end_utt()
self._decoder.start_utt()
return detected
def release(self):
self._decoder.end_utt()
def __str__(self):
return 'PocketSphinx'
class PorcupineEngine(Engine):
def __init__(self, keyword, sensitivity):
self._porcupine = Porcupine(
library_path=os.path.join(self._repo_path, 'lib/linux/x86_64/libpv_porcupine.so'),
model_path=os.path.join(self._repo_path, 'lib/common/porcupine_params.pv'),
keyword_paths=[os.path.join(self._repo_path, 'resources/keyword_files/linux/%s_linux.ppn' % keyword.lower())],
sensitivities=[sensitivity])
def process(self, pcm):
assert pcm.dtype == np.int16
return self._porcupine.process(pcm) == 0
def release(self):
self._porcupine.delete()
def __str__(self):
return 'Porcupine'
@property
def _repo_path(self):
return os.path.join(os.path.dirname(__file__), 'engines/porcupine')
class SnowboyEngine(Engine):
def __init__(self, keyword, sensitivity):
keyword = keyword.lower()
if keyword == 'alexa':
model_relative_path = 'engines/snowboy/resources/alexa/alexa-avs-sample-app/alexa.umdl'
else:
model_relative_path = 'engines/snowboy/resources/models/%s.umdl' % keyword.replace(' ', '_')
model_str = os.path.join(os.path.dirname(__file__), model_relative_path).encode()
resource_filename = os.path.join(os.path.dirname(__file__), 'engines/snowboy/resources/common.res').encode()
self._snowboy = snowboydetect.SnowboyDetect(resource_filename=resource_filename, model_str=model_str)
# https://github.com/Kitt-AI/snowboy#pretrained-universal-models
if keyword == 'jarvis':
self._snowboy.SetSensitivity(('%f,%f' % (sensitivity, sensitivity)).encode())
else:
self._snowboy.SetSensitivity(str(sensitivity).encode())
if keyword in {'alexa', 'computer', 'jarvis', 'view glass'}:
self._snowboy.ApplyFrontend(True)
else:
self._snowboy.ApplyFrontend(False)
def process(self, pcm):
assert pcm.dtype == np.int16
return self._snowboy.RunDetection(pcm.tobytes()) == 1
def release(self):
pass
def __str__(self):
return 'Snowboy'
class NyumayaEngine(Engine):
def __init__(self, keyword, sensitivity):
#logging.info("INIT NYUMAYA")
keyword = keyword.lower()
model_relative_path = 'engines/nyumaya_audio_recognition/models/Hotword/%s_v1.0.0.premium' % keyword
model_str = os.path.join(os.path.dirname(__file__), model_relative_path)
libpath="engines/nyumaya_audio_recognition/lib/linux_x86_64/libnyumaya_premium.so.1.0.0"
self._extractor = FeatureExtractor(libpath)
self._detector = AudioRecognition(libpath)
keywordId = self._detector.addModel(model_str,sensitivity)
def process(self, pcm):
assert pcm.dtype == np.int16
#logging.info(len(pcm))
features = self._extractor.signalToMel(pcm.tobytes(),1.0)
return self._detector.runDetection(features) == 1
def release(self):
pass
def __str__(self):
return 'Nyumaya'
| 2.140625 | 2 |
objO_and_ctxMgr/harakiri.py | thirschbuechler/didactic-barnacles | 0 | 4341 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 22:18:58 2020
@author: https://stackoverflow.com/questions/293431/python-object-deleting-itself
@editor: thirschbuechler
this is probably overkill to alternatively exit a with-context, rather than by exception,
but hey, maybe it will be needed, or related to getting rid of the visa-handle within thvisa
# for some reason, __enter__ does not work in the with-context
"""
# NOTE: This is Python 3 code, it should work with python 2, but I haven't tested it.
import weakref #https://docs.python.org/3/library/weakref.html
class InsaneClass(object):
_alive = []
def __new__(cls): # there is a difference btw. cls and self, but i don't understand
self = super().__new__(cls)
InsaneClass._alive.append(self)
return weakref.proxy(self)
def commit_suicide(self):
self._alive.remove(self)
def __enter__(self):
print("enter says hello")
return self
def __init__(self):
pass
def __exit__(self, exc_type, exc_value, tb):# "with" context exit: call del
print("bye")
if __name__ == '__main__': # test if called as executable, not as library
instance = InsaneClass()
instance.__enter__()
instance.commit_suicide()
#print(instance)
print(InsaneClass) # pointer
print(InsaneClass().__enter__()) # an object
print("now, something completely different!")
with InsaneClass() as i:
i.commit_suicide()
print(i) | 2.6875 | 3 |
chapter2/gestures.py | srimani-programmer/Opencv-with-Python-Blueprints-second-Edition | 39 | 4342 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A module containing an algorithm for hand gesture recognition"""
import numpy as np
import cv2
from typing import Tuple
__author__ = "<NAME>"
__license__ = "GNU GPL 3.0 or later"
def recognize(img_gray):
"""Recognizes hand gesture in a single-channel depth image
This method estimates the number of extended fingers based on
a single-channel depth image showing a hand and arm region.
:param img_gray: single-channel depth image
:returns: (num_fingers, img_draw) The estimated number of
extended fingers and an annotated RGB image
"""
# segment arm region
segment = segment_arm(img_gray)
# find the hull of the segmented area, and based on that find the
# convexity defects
(contour, defects) = find_hull_defects(segment)
# detect the number of fingers depending on the contours and convexity
# defects, then draw defects that belong to fingers green, others red
img_draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB)
(num_fingers, img_draw) = detect_num_fingers(contour,
defects, img_draw)
return (num_fingers, img_draw)
def segment_arm(frame: np.ndarray, abs_depth_dev: int = 14) -> np.ndarray:
"""Segments arm region
This method accepts a single-channel depth image of an arm and
hand region and extracts the segmented arm region.
It is assumed that the hand is placed in the center of the image.
:param frame: single-channel depth image
:returns: binary image (mask) of segmented arm region, where
arm=255, else=0
"""
height, width = frame.shape
# find center (21x21 pixel) region of imageheight frame
center_half = 10 # half-width of 21 is 21/2-1
center = frame[height // 2 - center_half:height // 2 + center_half,
width // 2 - center_half:width // 2 + center_half]
# find median depth value of center region
med_val = np.median(center)
# try this instead:
frame = np.where(abs(frame - med_val) <= abs_depth_dev,
128, 0).astype(np.uint8)
# morphological
kernel = np.ones((3, 3), np.uint8)
frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel)
# connected component
small_kernel = 3
frame[height // 2 - small_kernel:height // 2 + small_kernel,
width // 2 - small_kernel:width // 2 + small_kernel] = 128
mask = np.zeros((height + 2, width + 2), np.uint8)
flood = frame.copy()
cv2.floodFill(flood, mask, (width // 2, height // 2), 255,
flags=4 | (255 << 8))
ret, flooded = cv2.threshold(flood, 129, 255, cv2.THRESH_BINARY)
return flooded
def find_hull_defects(segment: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Find hull defects
This method finds all defects in the hull of a segmented arm
region.
:param segment: a binary image (mask) of a segmented arm region,
where arm=255, else=0
:returns: (max_contour, defects) the largest contour in the image
and all corresponding defects
"""
contours, hierarchy = cv2.findContours(segment, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# find largest area contour
max_contour = max(contours, key=cv2.contourArea)
epsilon = 0.01 * cv2.arcLength(max_contour, True)
max_contour = cv2.approxPolyDP(max_contour, epsilon, True)
# find convexity hull and defects
hull = cv2.convexHull(max_contour, returnPoints=False)
defects = cv2.convexityDefects(max_contour, hull)
return max_contour, defects
def detect_num_fingers(contour: np.ndarray, defects: np.ndarray,
img_draw: np.ndarray, thresh_deg: float = 80.0) -> Tuple[int, np.ndarray]:
"""Detects the number of extended fingers
This method determines the number of extended fingers based on a
contour and convexity defects.
It will annotate an RGB color image of the segmented arm region
with all relevant defect points and the hull.
:param contours: a list of contours
:param defects: a list of convexity defects
:param img_draw: an RGB color image to be annotated
:returns: (num_fingers, img_draw) the estimated number of extended
fingers and an annotated RGB color image
"""
# if there are no convexity defects, possibly no hull found or no
# fingers extended
if defects is None:
return [0, img_draw]
# we assume the wrist will generate two convexity defects (one on each
# side), so if there are no additional defect points, there are no
# fingers extended
if len(defects) <= 2:
return [0, img_draw]
# if there is a sufficient amount of convexity defects, we will find a
# defect point between two fingers so to get the number of fingers,
# start counting at 1
num_fingers = 1
# Defects are of shape (num_defects,1,4)
for defect in defects[:, 0, :]:
# Each defect is an array of four integers.
# First three indexes of start, end and the furthest
# points respectively
# contour is of shape (num_points,1,2) - 2 for point coordinates
start, end, far = [contour[i][0] for i in defect[:3]]
# draw the hull
cv2.line(img_draw, tuple(start), tuple(end), (0, 255, 0), 2)
# if angle is below a threshold, defect point belongs to two
# extended fingers
if angle_rad(start - far, end - far) < deg2rad(thresh_deg):
# increment number of fingers
num_fingers += 1
# draw point as green
cv2.circle(img_draw, tuple(far), 5, (0, 255, 0), -1)
else:
# draw point as red
cv2.circle(img_draw, tuple(far), 5, (0, 0, 255), -1)
# make sure we cap the number of fingers
return min(5, num_fingers), img_draw
def angle_rad(v1, v2):
"""Angle in radians between two vectors
This method returns the angle (in radians) between two array-like
vectors using the cross-product method, which is more accurate for
small angles than the dot-product-acos method.
"""
return np.arctan2(np.linalg.norm(np.cross(v1, v2)), np.dot(v1, v2))
def deg2rad(angle_deg):
"""Convert degrees to radians
This method converts an angle in radians e[0,2*np.pi) into degrees
e[0,360)
"""
return angle_deg / 180.0 * np.pi
| 3.734375 | 4 |
satt/trace/logger/panic.py | jnippula/satt | 54 | 4343 | <filename>satt/trace/logger/panic.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
// Copyright (c) 2015 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'''
""" PanicLogger RAM-tracing
"""
import sys
import time
from logger import Logger
class PanicLogger(Logger):
""" Panic logger
"""
def __init__(self, control):
# Base class init call
Logger.__init__(self, control)
# Add default kernel module parameter for RAM-tracing
self._kernel_module_parameters += " trace_method=1 sideband_log_method=1"
# Add more option to command line input
self._parser.add_argument('-p', '--panic', action='store', help='Panic tracing mode: 1=Normal, 2=Hooked(default)',
required=False, default=2)
self._parser.add_argument('-s', '--sideband', action='store', help='Panic tracing mode: 0=Off, 1=On(default)',
required=False, default=1)
self._parser.add_argument('-g', '--gbuffer', action='store', help='Dump trace data to gbuffer: 0=Off, 1=On(default)',
required=False, default=1)
self._parser.add_argument('-u', '--userspace', action='store', help='Exclude user space: 0=Off, 1=On(default)',
required=False, default=1)
self._parser.add_argument('-k', '--kernel', action='store', help='Exclude kernel: 0=Off(default), 1=On',
required=False, default=0)
self._parser.add_argument('-d', '--dump', action='store',
help='Dump kernel and kernel modules for processing: 0=Off, 1=On(default)',
required=False, default=0)
self.args = self._parser.parse_args()
self._kernel_module_parameters += " panic_tracer=" + str(self.args.panic)
self._kernel_module_parameters += " panic_sideband=" + str(self.args.sideband)
self._kernel_module_parameters += " panic_gbuffer=" + str(self.args.gbuffer)
self._kernel_module_parameters += " exclude_userspace=" + str(self.args.userspace)
self._kernel_module_parameters += " exclude_kernel=" + str(self.args.kernel)
def initialize(self):
self._debug_print("PanicLogger::initialize")
# Initialize Logger base class
Logger.initialize(self)
# Call start_tracing earlier to stop execution earlier
self.start_tracing()
def start_tracing(self):
self._debug_print("start_tracing")
trace_name, trace_path = self.get_trace_name("Enter <<trace name>> to start panic tracing? :")
if trace_name:
self.set_trace_path(trace_path, trace_name)
self.get_build_info()
# TODO Problem, there is no Sideband.bin info yet
# Quick Fix
# Start tracing, wait 100ms, Stop tracing, fetch sideband info
Logger.start_tracing(self)
time.sleep(0.2)
Logger.stop_tracing(self)
time.sleep(0.2)
Logger.get_sideband_data(self)
self.dump_kernel()
self.dump_linux_gate()
self.dump_kernel_modules()
Logger.start_tracing(self)
print ""
print "Panic tracing activated"
print "If panic happens, wait 10s and reboot device."
print ""
print "When device boot up run following command:"
print "sat-panic-fetch " + self.trace_name
sys.exit(0)
else:
print "Panic Tracer did not get started"
def stop_tracing(self):
return
def get_data(self):
return
def get_trace_data(self):
return
| 2.671875 | 3 |
xlab/cli.py | csalcedo001/xlab | 1 | 4344 | import sys
import os
from . import filesys
MAIN_USAGE_MESSAGE = """
usage: xlab command ...
Options:
positional arguments:
command
project
"""
def project(args):
if len(args) != 1:
print("error: Invalid arguments.")
exit()
if args[0] == 'init':
root = os.getcwd()
dirs = filesys.Directories()
dirs.set_root(root)
def main():
if len(sys.argv) <= 1:
print(MAIN_USAGE_MESSAGE)
exit()
command = sys.argv[1]
args = sys.argv[2:]
if command == 'project':
exe = project
else:
print("error: No command 'xlab {}'.".format(command))
exit()
exe(args) | 2.640625 | 3 |
python/paddle/optimizer/adamw.py | jzhang533/Paddle | 0 | 4345 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .optimizer import Optimizer
from .adam import Adam
from ..fluid import core
from ..fluid import framework
from ..fluid.framework import Variable
from ..fluid.dygraph import base as imperative_base
from collections import Callable
import paddle
_C_ops = core.ops
__all__ = []
class AdamW(Adam):
r"""
The AdamW optimizer is implemented based on the AdamW Optimization
in paper `DECOUPLED WEIGHT DECAY REGULARIZATION <https://arxiv.org/pdf/1711.05101.pdf>`_.
it can resolves the problem of L2 regularization failure in the Adam optimizer.
.. math::
t & = t + 1
moment\_1\_out & = {\beta}_1 * moment\_1 + (1 - {\beta}_1) * grad
moemnt\_2\_out & = {\beta}_2 * moment\_2 + (1 - {\beta}_2) * grad * grad
learning\_rate & = learning\_rate *
\frac{\sqrt{1 - {\beta}_2^t}}{1 - {beta}_1^t}
param\_out & = param - learning\_rate * (\frac{moment\_1}{\sqrt{moment\_2} + \epsilon} + \lambda * param)
Args:
learning_rate (float|LRScheduler, optional): The learning rate used to update ``Parameter``.
It can be a float value or a LRScheduler. The default value is 0.001.
parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. And you can specify different options for \
different parameter groups such as the learning rate, weight decay, etc, \
then the parameters are list of dict. Note that the learning_rate in paramter groups \
represents the scale of base learning_rate. \
The default value is None in static mode, at this time all parameters will be updated.
beta1 (float|Tensor, optional): The exponential decay rate for the 1st moment estimates.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 0.9.
beta2 (float|Tensor, optional): The exponential decay rate for the 2nd moment estimates.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 0.999.
epsilon (float, optional): A small float value for numerical stability.
The default value is 1e-08.
weight_decay (float|Tensor, optional): The weight decay coefficient, it can be float or Tensor. The default value is 0.01.
lr_ratio (function|None, optional): If it is not None,
the learning rate will be updated with layerwise learning rate ratio.
Otherwise, the learning rate is the original.
Default: None.
apply_decay_param_fun (function|None, optional): If it is not None,
only tensors that makes apply_decay_param_fun(Tensor.name)==True
will be updated with weight decay. It only works when we want to specify tensors.
Default: None.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
some derived class of ``GradientClipBase`` . There are three cliping strategies
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
:ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators.
The accumulators are updated at every step. Every element of the two moving-average
is updated in both dense mode and sparse mode. If the size of parameter is very large,
then the update may be very slow. The lazy mode only update the element that has
gradient in current mini-batch, so it will be much more faster. But this mode has
different semantics with the original Adam algorithm and may lead to different result.
The default value is False.
multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
The default value is None.
**Notes**:
**Currently, AdamW doesn't support sparse parameter optimization.**
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand([10,10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.AdamW(learning_rate=0.1,
parameters=linear.parameters(),
beta1=beta1,
beta2=beta2,
weight_decay=0.01)
out.backward()
adam.step()
adam.clear_grad()
#Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp)
out = linear_2(out)
loss = paddle.mean(out)
adam = paddle.optimizer.AdamW(
learning_rate=0.1,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1,
'beta1': 0.8
}],
weight_decay=0.01,
beta1=0.9)
out.backward()
adam.step()
adam.clear_grad()
"""
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
parameters=None,
weight_decay=0.01,
lr_ratio=None,
apply_decay_param_fun=None,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
name=None):
assert learning_rate is not None
assert beta1 is not None
assert beta2 is not None
assert epsilon is not None
if not 0 <= beta1 < 1:
raise ValueError("Invaild value of beta1, expect beta1 in [0,1).")
if not 0 <= beta2 < 1:
raise ValueError("Invaild value of beta2, expect beta2 in [0,1).")
if not 0 <= epsilon:
raise ValueError("Invaild value of epsilon, expect epsilon >= 0.")
coeff = weight_decay
if not isinstance(coeff, float) and \
not isinstance(coeff, framework.Variable):
raise TypeError("coeff should be float or Tensor.")
self._params_name = set()
self._apply_decay_param_fun = apply_decay_param_fun
self._coeff = coeff
self._lr_to_coeff = dict()
if lr_ratio is not None:
assert isinstance(lr_ratio, Callable)
if core.is_compiled_with_xpu() or core.is_compiled_with_npu():
raise NotImplementedError(
"'lr_ratio' is unimplemented in XPU and NPU")
self._lr_ratio = lr_ratio
super(AdamW, self).__init__(
learning_rate=learning_rate,
parameters=parameters,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
grad_clip=grad_clip,
name=name,
lazy_mode=lazy_mode,
multi_precision=multi_precision)
self._default_dict = {'coeff': coeff}
self.type = "adamw"
if core.is_compiled_with_xpu():
self.type = "adam"
# Use _auxiliary_vars together with _set_auxiliary_var/_get_auxiliary_var to achieve that.
self._auxiliary_vars = dict()
def _set_auxiliary_var(self, key, val):
self._auxiliary_vars[key] = val
def _get_auxiliary_var(self, key):
if key in self._auxiliary_vars:
return self._auxiliary_vars[key]
else:
return None
def _append_decoupled_weight_decay(self, block, param_and_grad):
"""
Add decoupled weight decay op.
parameter = parameter - parameter * coeff * lr
Args:
block: block in which variable is to be created
param_and_grad: (parameters, gradients) pairs,
the parameters need to decay.
Raises:
Exception: The type of coeff and parameter is not consistent.
"""
if isinstance(param_and_grad, dict):
param_and_grad = self._update_param_group(param_and_grad)
param, grad = param_and_grad
if self._apply_decay_param_fun is not None \
and not self._apply_decay_param_fun(param.name):
return
if isinstance(self._learning_rate, float):
learning_rate = self._learning_rate
else:
# NOTE. We add this function to the _append_optimize_op(),
# for we must make sure _create_param_lr() be called after
# optimizer._create_global_learning_rate().
learning_rate = self._create_param_lr(param_and_grad)
with block.program._optimized_guard(
[param, grad]), framework.name_scope('weight decay'):
self._params_name.add(param.name)
# If it has been calculated, the result will be reused.
# NOTE(wangxi): In dygraph mode, apply_gradient will be executed
# every step, so need clear _lr_to_coeff every step,
# we do this in _create_optimization_pass
decay_coeff = self._lr_to_coeff.get(learning_rate, None)
if decay_coeff is None:
# NOTE(wangxi): for pipeline to set device:all
with paddle.static.device_guard(None):
decay_coeff = 1.0 - learning_rate * self._coeff
self._lr_to_coeff[learning_rate] = decay_coeff
find_master = (self._multi_precision and
param.dtype == core.VarDesc.VarType.FP16)
if find_master:
master_weight = self._master_weights[param.name]
scaled_param = master_weight * decay_coeff
paddle.fluid.layers.assign(
input=scaled_param, output=master_weight)
else:
scaled_param = param * decay_coeff
paddle.fluid.layers.assign(input=scaled_param, output=param)
def _append_optimize_op(self, block, param_and_grad):
if paddle.is_compiled_with_xpu():
self._append_decoupled_weight_decay(block, param_and_grad)
return super(AdamW, self)._append_optimize_op(block, param_and_grad)
assert isinstance(block, framework.Block)
if isinstance(param_and_grad, dict):
param_and_grad = self._update_param_group(param_and_grad)
param, grad = param_and_grad
# Whether we should do weight decay for the parameter.
with_decay = True
if self._apply_decay_param_fun is not None \
and not self._apply_decay_param_fun(param.name):
with_decay = False
moment1 = self._get_accumulator(self._moment1_acc_str,
param_and_grad[0])
moment2 = self._get_accumulator(self._moment2_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param_and_grad[0])
find_master = self._multi_precision and param_and_grad[
0].dtype == core.VarDesc.VarType.FP16
master_weight = (self._master_weights[param_and_grad[0].name]
if find_master else None)
lr = self._create_param_lr(param_and_grad)
# create the adamw optimize op
if framework.in_dygraph_mode():
lr_ratio_ = 1. if self._lr_ratio is None else self._lr_ratio(
param_and_grad[0])
_beta1 = self._beta1 if not isinstance(
self._beta1, Variable) else self._beta1.numpy().item(0)
_beta2 = self._beta2 if not isinstance(
self._beta2, Variable) else self._beta2.numpy().item(0)
_, _, _, _, _ = _C_ops.adamw(
param_and_grad[0], param_and_grad[1], lr, moment1, moment2,
beta1_pow_acc, beta2_pow_acc, param_and_grad[0], moment1,
moment2, beta1_pow_acc, beta2_pow_acc, 'epsilon', self._epsilon,
'lazy_mode', self._lazy_mode, 'min_row_size_to_use_multithread',
1000, 'beta1', _beta1, 'beta2', _beta2, 'coeff', self._coeff,
"lr_ratio", lr_ratio_)
return None
inputs = {
"Param": [param_and_grad[0]],
"Grad": [param_and_grad[1]],
"LearningRate": [lr],
"Moment1": [moment1],
"Moment2": [moment2],
"Beta1Pow": [beta1_pow_acc],
"Beta2Pow": [beta2_pow_acc],
}
# Pass found_inf to adamw, to skip update for not only param, but also momentum and beta_pow
found_inf = self._get_auxiliary_var('found_inf')
if found_inf:
inputs['SkipUpdate'] = found_inf
outputs = {
"ParamOut": [param_and_grad[0]],
"Moment1Out": [moment1],
"Moment2Out": [moment2],
"Beta1PowOut": [beta1_pow_acc],
"Beta2PowOut": [beta2_pow_acc],
}
attrs = {
"lazy_mode": self._lazy_mode,
"min_row_size_to_use_multithread": 1000,
"multi_precision": find_master,
"with_decay": with_decay,
"coeff": self._coeff,
"lr_ratio": 1.
if self._lr_ratio is None else self._lr_ratio(param_and_grad[0])
}
if isinstance(self._beta1, Variable):
inputs['Beta1Tensor'] = self._beta1
else:
attrs['beta1'] = self._beta1
if isinstance(self._beta2, Variable):
inputs['Beta2Tensor'] = self._beta2
else:
attrs['beta2'] = self._beta2
if isinstance(self._epsilon, Variable):
inputs['EpsilonTensor'] = self._epsilon
else:
attrs['epsilon'] = self._epsilon
if find_master:
inputs["MasterParam"] = master_weight
outputs["MasterParamOut"] = master_weight
adamw_op = block.append_op(
type=self.type,
inputs=inputs,
outputs=outputs,
attrs=attrs,
stop_gradient=True)
return adamw_op
def _create_optimization_pass(self, parameters_and_grads):
optimize_ops = super(
AdamW, self)._create_optimization_pass(parameters_and_grads)
# In dygraph mode, clear _lr_to_coeff after applied gradient
self._lr_to_coeff = dict()
return optimize_ops
def __str__(self):
return " ".join(["Weight Decay, params:", ",".join(self._params_name)])
def _update_param_group(self, parameters):
self._coeff = parameters.get('coeff', self._default_dict['coeff'])
parameters = parameters.get('params')
return parameters
| 2.09375 | 2 |
tests/resources/test_interactions.py | VinLau/BAR_API | 1 | 4346 | <reponame>VinLau/BAR_API<gh_stars>1-10
from api import app
from unittest import TestCase
class TestIntegrations(TestCase):
maxDiff = None
def setUp(self):
self.app_client = app.test_client()
def test_get_itrns(self):
"""
This function test retrieving protein interactions for various species' genes.
"""
# Valid request rice
response = self.app_client.get("/interactions/rice/LOC_Os01g52560")
expected = {
"wasSuccessful": True,
"data": [
{
"protein_1": "LOC_Os01g01080",
"protein_2": "LOC_Os01g52560",
"total_hits": 1,
"Num_species": 1,
"Quality": 1,
"pcc": 0.65,
},
{
"protein_1": "LOC_Os01g52560",
"protein_2": "LOC_Os01g73310",
"total_hits": 1,
"Num_species": 1,
"Quality": 1,
"pcc": -0.116,
},
],
}
self.assertEqual(response.json, expected)
# Invalid species
response = self.app_client.get("/interactions/poplar/abc")
expected = {"wasSuccessful": False, "error": "Invalid species or gene ID"}
self.assertEqual(response.json, expected)
# Invalid gene id
response = self.app_client.get("/interactions/rice/abc")
expected = {"wasSuccessful": False, "error": "Invalid species or gene ID"}
self.assertEqual(response.json, expected)
# Gene does not exist
response = self.app_client.get("/interactions/rice/LOC_Os01g52565")
expected = {
"wasSuccessful": False,
"error": "There are no data found for the given gene",
}
self.assertEqual(response.json, expected)
| 2.859375 | 3 |
src/dialogflow-java-client-master/samples/clients/VirtualTradingAssistant/src/main/java/ai/examples/scraper/historicalScrape.py | 16kozlowskim/Group-20-SE | 0 | 4347 | # install BeautifulSoup4 before running
#
# prints out historical data in csv format:
#
# [date, open, high, low, close, volume]
#
import re, csv, sys, urllib2
from bs4 import BeautifulSoup
# If start date and end date is the same only one value will be returned and
# if not the multiple values which can be used to make calculations
#
# ticker (company symbol)
# interval (d (daily), m (monthly), q (quarterly), y (yearly))
# start_date (YYYYMMDD)
# end_date (YYYYMMDD)
def get_historical_data(ticker, interval, start_date, end_date):
#pathToCSV = '/Users/Michal/Downloads/dialogflow-java-client-master2/samples/clients/VirtualTradingAssistant/src/main/java/ai/api/examples/fileStore/file.csv'
#pathToCSV = 'C:\\Users\\ojwoo\\Documents\\Warwick\\CS261\\Coursework\\dialogflow-java-client-master\\samples\\clients\\VirtualTradingAssistant\\src\\main\\java\\ai\\api\\examples\\fileStore\\file.csv'
#pathToCSV = '/Users/Michal/Desktop/apache-tomcat-8.5.28/bin/misc/file.csv'
pathToCSV = 'C:\\apache-tomcat-8.5.28\\bin\\misc\\file.csv'
url_builder = []
url_builder.append('https://stooq.com/q/d/?s=')
url_builder.append(ticker)
url_builder.append('&c=0&d1=')
url_builder.append(start_date)
url_builder.append('&d2=')
url_builder.append(end_date)
url_builder.append('&i=')
url_builder.append(interval)
url = ''.join(url_builder)
page = urllib2.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
link = soup.findAll('a', href=re.compile('^q/d/l/'))
link = re.search('"(.*)"', str(link))
try:
link = link.group(1)
except AttributeError:
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerow('')
exit()
link = link.replace('amp;', '')
arr = []
arr.append('https://stooq.com/')
arr.append(link)
link = ''.join(arr)
response = urllib2.urlopen(link)
cr = csv.reader(response)
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerows(cr)
def main():
args = sys.argv
get_historical_data(args[1], args[2], args[3], args[4])
if __name__ == '__main__':
main()
| 3.140625 | 3 |
client/client.py | odontomachus/hotbox | 0 | 4348 | <reponame>odontomachus/hotbox<filename>client/client.py<gh_stars>0
import sys
import io
from collections import defaultdict
import struct
from time import sleep
import queue
import threading
import serial
from serial import SerialException
RUN_LABELS = ('Time left', 'Temp 1', 'Temp 2', 'Off Goal', 'Temp Change', 'Duty cycle (/30)', 'Heating', 'Cycle', 'Total time', 'Goal temp')
MSG_RUN_STATUS = 1
MSG_CONFIG = 2
MSG_STATUS = 3
MSG_LENGTHS = {MSG_RUN_STATUS: 20, MSG_CONFIG: 9, MSG_STATUS: 5}
STATE_START = 1
STATE_ACTIVE = 2
STATE_READY = 3
STATE_BOOT = 4
STATE_INIT = 5
STATE_DISCONNECTED = 127 # can't connect to serial
HB_CYCLE = 30
class RunStatus:
__slots__ = ('countdown', 't1', 't2', 'dg', 'dt', 'part', 'state', 'cycle', 'time', 'goal')
def __init__(self, message):
(self.t1,
self.t2,
self.countdown,
self.part,
self.cycle,
self.state,
self.dg,
self.dt,
self.time,
self.goal,
) = struct.unpack('=BBLBB?bbLB', message)
def __str__(self):
return "\t".join(
map(str,
(self.countdown,
self.t1,
self.t2,
self.dg,
self.dt,
self.part,
"On" if self.state else "Off",
self.state,
self.cycle,
self.time,
self.goal,
)
))
class OvenConfig:
__slots__ = ('temp', 'time')
def __init__(self, message):
(self.time,
self.temp) = struct.unpack('=LB', message)
class OvenStatus:
__slots__ = ('status',)
def __init__(self, message):
self.status = message[0]
def check_connection(fun):
def inner(self, *args, **kwargs):
if self.state == "connected":
try:
fun(self, *args, **kwargs)
except SerialException:
self.disconnect()
# workaround for bug in pyserial
# http://sourceforge.net/p/pyserial/patches/37/
except TypeError as e:
self.disconnect()
return inner
class Client(threading.Thread):
""" Client class for hotbox serial connection """
parsers = {
MSG_STATUS: OvenStatus,
MSG_RUN_STATUS: RunStatus,
MSG_CONFIG: OvenConfig,
}
def __init__(self):
super().__init__()
self.state = 'disconnected'
self.msg_queue = {MSG_STATUS: queue.Queue(),
MSG_CONFIG: queue.Queue(),
MSG_RUN_STATUS: queue.Queue(),
}
def connect(self, port):
try:
self.conn = serial.Serial(port, 9600, timeout=0.05)
# empty buffer
while len(self.conn.read(1)) > 0:
pass
self.state = 'connected'
sleep(0.01)
self.oven_query_config()
sleep(0.2)
self.oven_status()
except SerialException:
self.disconnect()
# workaround for bug in pyserial
# http://sourceforge.net/p/pyserial/patches/37/
except TypeError as e:
self.disconnect()
finally:
self.start_message = 0
def run(self):
self.running = 1
parsed_length = 0
mtype = 0
msg_length = 0
while self.running:
# Don't do anything if disconnected
if (self.state == 'disconnected'):
sleep(0.1)
continue
try:
c = self.conn.read(1)
except SerialException:
self.disconnect()
continue
# workaround for bug in pyserial
# http://sourceforge.net/p/pyserial/patches/37/
except TypeError as e:
self.disconnect()
continue
# wait for message
if not c:
continue
# this is the message type byte
if parsed_length == 3:
parsed_length += 1
if c[0] == 0:
continue
mtype = c[0]
msg_length = MSG_LENGTHS[mtype]
buffer = bytes()
continue
if parsed_length < 3:
# Abort if not a null byte
if c[0]:
parsed_length = 0
continue
# otherwise increment parsed length
parsed_length += 1
continue
# in any other case this is a data byte
parsed_length += 1
buffer += c
if parsed_length == msg_length:
data = self.parsers[mtype](buffer)
self.msg_queue[mtype].put(data)
parsed_length = 0
mtype = 0
msg_length = 0
@check_connection
def oven_configure(self, ctime, temp):
self.conn.write(b'c'+struct.pack('=LB', ctime, temp))
@check_connection
def oven_start(self):
self.conn.write(b's')
@check_connection
def oven_stop(self):
self.conn.write(b't')
@check_connection
def oven_status(self):
self.conn.write(b'r')
@check_connection
def oven_query_config(self):
self.conn.write(b'q')
def disconnect(self):
self.state = 'disconnected'
self.msg_queue[MSG_STATUS].put(OvenStatus((STATE_DISCONNECTED,)))
| 2.46875 | 2 |
test/functional/abc-sync-chain.py | ComputerCraftr/devault | 35 | 4349 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that a node receiving many (potentially out of order) blocks exits
initial block download (IBD; this occurs once it has passed minimumchainwork)
and continues to sync without seizing.
"""
import random
from test_framework.blocktools import create_block, create_coinbase
from test_framework.mininode import (CBlockHeader,
network_thread_start,
P2PInterface,
msg_block,
msg_headers)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, p2p_port
NUM_IBD_BLOCKS = 50
class BaseNode(P2PInterface):
def send_header(self, block):
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
self.send_message(msg)
def send_block(self, block):
self.send_message(msg_block(block))
class SyncChainTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
# Setting minimumchainwork makes sure we test IBD as well as post-IBD
self.extra_args = [
["-minimumchainwork={:#x}".format(202 + 2 * NUM_IBD_BLOCKS)]]
def run_test(self):
node0conn = BaseNode()
node0conn.peer_connect('127.0.0.1', p2p_port(0))
network_thread_start()
node0conn.wait_for_verack()
node0 = self.nodes[0]
tip = int(node0.getbestblockhash(), 16)
height = node0.getblockcount() + 1
time = node0.getblock(node0.getbestblockhash())['time'] + 1
blocks = []
for i in range(NUM_IBD_BLOCKS * 2):
block = create_block(tip, create_coinbase(height), time)
block.solve()
blocks.append(block)
tip = block.sha256
height += 1
time += 1
# Headers need to be sent in-order
for b in blocks:
node0conn.send_header(b)
# Send blocks in some random order
for b in random.sample(blocks, len(blocks)):
node0conn.send_block(b)
# The node should eventually, completely sync without getting stuck
def node_synced():
return node0.getbestblockhash() == blocks[-1].hash
wait_until(node_synced)
if __name__ == '__main__':
SyncChainTest().main()
| 2.359375 | 2 |
djangostagram/posts/models.py | hongsemy/InstagramWithDjango | 0 | 4350 | <reponame>hongsemy/InstagramWithDjango
from django.db import models
from djangostagram.users import models as user_model
# Create your models here.
# This class is used in other models as an inheritance.
# An often-used pattern
class TimeStamedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
# An option that makes this model to not show up directly on the database
class Meta:
abstract = True
class Posts(TimeStamedModel):
author = models.ForeignKey(
user_model.User,
null = True,
on_delete = models.CASCADE,
related_name = "post_author"
)
caption = models.TextField(blank=True)
image = models.ImageField(blank=True)
image_likes = models.ManyToManyField(user_model.User, related_name='post_image_likes')
class Comments(TimeStamedModel):
author = models.ForeignKey(
user_model.User,
null = True,
on_delete = models.CASCADE,
related_name = "comment_author"
)
posts = models.ForeignKey(
Posts,
null = True,
on_delete = models.CASCADE,
related_name = "comment_post"
)
contents = models.TextField(blank=True)
| 2.8125 | 3 |
guillotina/contrib/workflows/events.py | rboixaderg/guillotina | 173 | 4351 | from guillotina.contrib.workflows.interfaces import IWorkflowChangedEvent
from guillotina.events import ObjectEvent
from zope.interface import implementer
@implementer(IWorkflowChangedEvent)
class WorkflowChangedEvent(ObjectEvent):
"""An object has been moved"""
def __init__(self, object, workflow, action, comments):
ObjectEvent.__init__(self, object)
self.object = object
self.workflow = workflow
self.action = action
self.comments = comments
| 1.921875 | 2 |
data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py | lrwb-aou/curation | 16 | 4352 | """
Suppress COVID EHR vaccine concepts.
Original Issues: DC-1692
"""
# Python imports
import logging
# Project imports
from cdr_cleaner.cleaning_rules.deid.concept_suppression import AbstractBqLookupTableConceptSuppression
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, CDM_TABLES
from utils import pipeline_logging
# Third party imports
from google.cloud.exceptions import GoogleCloudError
LOGGER = logging.getLogger(__name__)
SUPPRESSION_RULE_CONCEPT_TABLE = 'covid_vaccine_concepts'
COVID_VACCINE_CONCEPT_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS
with covid_vacc as (
SELECT *
FROM `{{project_id}}.{{dataset_id}}.concept`
WHERE (
-- done by name and vocab --
REGEXP_CONTAINS(concept_name, r'(?i)(COVID)') AND
REGEXP_CONTAINS(concept_name, r'(?i)(VAC)') AND
vocabulary_id not in ('PPI')
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(207)|(208)|(210)|(211)|(212)')
and vocabulary_id = 'CVX'
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(91300)|(91301)|(91302)|(91303)|(91304)')
and vocabulary_id = 'CPT4'
)
),
concepts_via_cr as (
select distinct c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_relationship`
on c.concept_id = concept_id_1
where concept_id_2 in (select concept_id from covid_vacc)
# and concept_id_1 not in (select concept_id from covid_vacc)
and (
relationship_id not in ('Subsumes', 'RxNorm dose form of', 'Dose form group of', 'RxNorm - SPL') OR
(relationship_id = 'RxNorm - SPL' and REGEXP_CONTAINS(concept_name, r'(?i)(COVID)'))
)
),
concepts_via_ca as (
select c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca
on c.concept_id = ca.descendant_concept_id
where ca.ancestor_concept_id in (select concept_id from covid_vacc)
)
select distinct * from covid_vacc
union distinct
select distinct * from concepts_via_ca
union distinct
select distinct * from concepts_via_cr
""")
class CovidEHRVaccineConceptSuppression(AbstractBqLookupTableConceptSuppression
):
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
table_namer=None):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = "Suppress COVID EHR vaccine concepts."
super().__init__(
issue_numbers=['DC1692'],
description=desc,
affected_datasets=[cdr_consts.REGISTERED_TIER_DEID],
affected_tables=CDM_TABLES,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
concept_suppression_lookup_table=SUPPRESSION_RULE_CONCEPT_TABLE,
table_namer=table_namer)
def create_suppression_lookup_table(self, client):
concept_suppression_lookup_query = COVID_VACCINE_CONCEPT_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
sandbox_id=self.sandbox_dataset_id,
concept_suppression_lookup_table=self.
concept_suppression_lookup_table)
query_job = client.query(concept_suppression_lookup_query)
result = query_job.result()
if hasattr(result, 'errors') and result.errors:
LOGGER.error(f"Error running job {result.job_id}: {result.errors}")
raise GoogleCloudError(
f"Error running job {result.job_id}: {result.errors}")
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
| 1.875 | 2 |
pydbhub/httphub.py | sum3105/pydbhub | 18 | 4353 | import pydbhub
from typing import Any, Dict, List, Tuple
from json.decoder import JSONDecodeError
import requests
import io
def send_request_json(query_url: str, data: Dict[str, Any]) -> Tuple[List[Any], str]:
"""
send_request_json sends a request to DBHub.io, formatting the returned result as JSON
Parameters
----------
query_url : str
url of the API endpoint
data : Dict[str, Any]
data to be processed to the server.
Returns
-------
Tuple[List[Any], str]
The returned data is
- a list of JSON object.
- a string describe error if occurs
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
response = requests.post(query_url, data=data, headers=headers)
response.raise_for_status()
return response.json(), None
except JSONDecodeError as e:
return None, e.args[0]
except TypeError as e:
return None, e.args[0]
except requests.exceptions.HTTPError as e:
try:
return response.json(), e.args[0]
except JSONDecodeError:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
def send_request(query_url: str, data: Dict[str, Any]) -> Tuple[List[bytes], str]:
"""
send_request sends a request to DBHub.io.
Parameters
---- query_url : str
url of the API endpoint
data : Dict[str, Any]
data to be processed to the server.------
Returns
-------
List[bytes]
database file is returned as a list of bytes
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
response = requests.post(query_url, data=data, headers=headers)
response.raise_for_status()
return response.content, None
except requests.exceptions.HTTPError as e:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
def send_upload(query_url: str, data: Dict[str, Any], db_bytes: io.BufferedReader) -> Tuple[List[Any], str]:
"""
send_upload uploads a database to DBHub.io.
Parameters
----------
query_url : str
url of the API endpoint.
data : Dict[str, Any]
data to be processed to the server.
db_bytes : io.BufferedReader
A buffered binary stream of the database file.
Returns
-------
Tuple[List[Any], str]
The returned data is
- a list of JSON object.
- a string describe error if occurs
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
files = {"file": db_bytes}
response = requests.post(query_url, data=data, headers=headers, files=files)
response.raise_for_status()
if response.status_code != 201:
# The returned status code indicates something went wrong
try:
return response.json(), str(response.status_code)
except JSONDecodeError:
return None, str(response.status_code)
return response.json(), None
except requests.exceptions.HTTPError as e:
try:
return response.json(), e.args[0]
except JSONDecodeError:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
| 3.3125 | 3 |
test_calcscore.py | BrandonLeiran/bracket-scoring | 0 | 4354 | <reponame>BrandonLeiran/bracket-scoring
import pytest
from calcscore import round_score
# you'll be picking what teams make it to the next round
# - so picking 32, then 16, then 8, 4, 2, 1...i.e. round 1-6 winners
# teams will have a name & a seed
# seed doesn't change, so maybe make that not passed around w/ results
def test_round_score_invalid_round():
with pytest.raises(ValueError, match=r".*range*"):
round_score(0)
with pytest.raises(ValueError, match=r".*range*"):
round_score(7)
def test_round_score_invalid_winner():
VALID_ROUND = 1
all_teams = []
round_winners = []
picked_winners = ["picked team"]
with pytest.raises(ValueError, match=r".*invalid winner"):
round_score(VALID_ROUND, all_teams, round_winners, picked_winners)
# score = round_score(0)
# assert score == 0
| 3.03125 | 3 |
tests/test_get.py | bgyori/pyobo | 0 | 4355 | <filename>tests/test_get.py
import unittest
from operator import attrgetter
import obonet
from pyobo import SynonymTypeDef, get
from pyobo.struct import Reference
from pyobo.struct.struct import (
iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties,
iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs,
)
from tests.constants import TEST_CHEBI_OBO_PATH
class TestParseObonet(unittest.TestCase):
""""""
@classmethod
def setUpClass(cls) -> None:
cls.graph = obonet.read_obo(TEST_CHEBI_OBO_PATH)
def test_get_graph_typedefs(self):
"""Test getting type definitions from an :mod:`obonet` graph."""
pairs = {
(typedef.prefix, typedef.identifier)
for typedef in iterate_graph_typedefs(self.graph)
}
self.assertIn(('chebi', 'has_part'), pairs)
def test_get_graph_synonym_typedefs(self):
"""Test getting synonym type definitions from an :mod:`obonet` graph."""
synonym_typedefs = sorted(iterate_graph_synonym_typedefs(self.graph), key=attrgetter('id'))
self.assertEqual(
sorted([
SynonymTypeDef(id='IUPAC_NAME', name='IUPAC NAME'),
SynonymTypeDef(id='BRAND_NAME', name='BRAND NAME'),
SynonymTypeDef(id='INN', name='INN'),
], key=attrgetter('id')),
synonym_typedefs,
)
def test_get_node_synonyms(self):
"""Test getting synonyms from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
synonyms = list(iterate_node_synonyms(data))
self.assertEqual(1, len(synonyms))
synonym = synonyms[0]
self.assertEqual('N,N,N-tributylbutan-1-aminium fluoride', synonym.name, msg='name parsing failed')
self.assertEqual('EXACT', synonym.specificity, msg='specificity parsing failed')
# TODO implement
# self.assertEqual(SynonymTypeDef(id='IUPAC_NAME', name='IUPAC NAME'), synonym.type)
def test_get_node_properties(self):
"""Test getting properties from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
properties = list(iterate_node_properties(data))
t_prop = 'http://purl.obolibrary.org/obo/chebi/monoisotopicmass'
self.assertIn(t_prop, {prop for prop, value in properties})
self.assertEqual(1, sum(prop == t_prop for prop, value in properties))
value = [value for prop, value in properties if prop == t_prop][0]
self.assertEqual('261.28318', value)
def test_get_node_parents(self):
"""Test getting parents from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
parents = list(iterate_node_parents(data))
self.assertEqual(2, len(parents))
self.assertEqual({'24060', '51992'}, {
parent.identifier
for parent in parents
})
self.assertEqual({'chebi'}, {
parent.prefix
for parent in parents
})
def test_get_node_xrefs(self):
"""Test getting parents from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
xrefs = list(iterate_node_xrefs(data))
self.assertEqual(7, len(xrefs))
# NOTE the prefixes are remapped by PyOBO
self.assertEqual({'pubmed', 'cas', 'beilstein', 'reaxys'}, {
xref.prefix
for xref in xrefs
})
self.assertEqual(
{
('reaxys', '3570522'), ('beilstein', '3570522'), ('cas', '429-41-4'),
('pubmed', '21142041'), ('pubmed', '21517057'), ('pubmed', '22229781'), ('pubmed', '15074950'),
},
{(xref.prefix, xref.identifier) for xref in xrefs}
)
def test_get_node_relations(self):
"""Test getting relations from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:17051']
relations = list(iterate_node_relationships(data, 'chebi'))
self.assertEqual(1, len(relations))
typedef, target = relations[0]
self.assertIsNotNone(target)
self.assertIsInstance(target, Reference)
self.assertEqual('chebi', target.prefix)
self.assertEqual('29228', target.identifier)
self.assertIsNotNone(typedef)
self.assertIsInstance(typedef, Reference)
self.assertEqual('chebi', typedef.prefix)
self.assertEqual('is_conjugate_base_of', typedef.identifier)
class TestGet(unittest.TestCase):
"""Test generation of OBO objects."""
def test_get_obo(self):
"""Test getting an OBO document."""
obo = get('chebi', url=TEST_CHEBI_OBO_PATH, local=True)
terms = list(obo)
self.assertEqual(18, len(terms))
| 2.34375 | 2 |
src/commons.py | ymontilla/WebScrapingCatastro | 0 | 4356 | <reponame>ymontilla/WebScrapingCatastro
# -*- coding: utf-8 -*-
# +
## Utilidades comunes entre places y OSM.
# +
import csv
import ast
import codecs
from math import cos, asin, sqrt
# +
def read_csv_with_encoding(filename, delimiter="|", encoding="iso-8859-1"):
with codecs.open(filename, encoding=encoding) as fp:
reader = csv.reader(fp, delimiter=delimiter)
csvFile = list(reader)
return pd.DataFrame(csvFile[1:], columns=csvFile[0])
def read_json_with_encoding(filename, encoding="iso-8859-1"):
with codecs.open(filename, encoding=encoding) as a:
l = a.read()
json_file = ast.literal_eval(l)
return json_file
# -
import pandas as pd
def distance(lat1, lon1, lat2, lon2):
"""
El resultado de la medición de distancia esta en kilometros.
"""
p = 0.017453292519943295 #Pi/180
a = 0.5 - cos((lat2 - lat1) * p)/2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2
return 12742 * asin(sqrt(a))
def build_center_point(df):
lat = df["latitude"].mean()
lon = df["longitude"].mean()
return pd.DataFrame({'fid': [777], 'latitude': [lat], 'longitude': [lon]})
"""
El proceso es muy pesado y no es posible hacer el ananlisis con toda la data de bogotá, el número de registros es
demasiado grande para caber en memoria. El uso correcto es filtrar los datos antes de hacer el cross join.
"""
def compute_cross_distances(location_df, interest_points_df=None):
condition_latitude = ~location_df["latitude"].isna()
condition_longitude = ~location_df["longitude"].isna()
location_df_complete = location_df.loc[condition_latitude & condition_longitude]
results = []
for i in location_df_complete.index:
for j in interest_points_df.index:
results.append([
location_df_complete.loc[i, "fid"],
distance(location_df_complete.loc[i, "latitude"],
location_df_complete.loc[i, "longitude"],
float(interest_points_df.loc[j, "lat"]), float(interest_points_df.loc[j, "lon"])),
location_df_complete.loc[i, "latitude"],
location_df_complete.loc[i, "longitude"],
interest_points_df.loc[j, "lat"],
interest_points_df.loc[j, "lon"],
interest_points_df.loc[j, "amenity"],
interest_points_df.loc[j, "name"]
])
final = list(zip(*results))
return pd.DataFrame({'fid': final[0], 'distance': final[1], 'p_lat': final[2],
'p_lon': final[3], 'i_lat': final[4], 'i_lon': final[5],
'amenity': final[6], 'name': final[7]})
| 3.40625 | 3 |
GamesGetter.py | JamescMcE/BasketBet | 0 | 4357 | #This script Imports Game Data from ESPN, and Odds from the ODDS-API, and then imports them into a MySQL table, example in workbench here https://puu.sh/HOKCj/ce199eec8e.png
import mysql.connector
import requests
import json
import datetime
import time
#Connection to the MYSQL Server.
mydb = mysql.connector.connect(
host="",
user="",
password="",
database="basketbet_data"
)
mycursor = mydb.cursor()
#Games List.
allGames=[]
#Gets the game Data from ESPN API given the link.
def newGetter(gameDay):
#Json Response for YESTERDAY.
response = requests.get(gameDay).json()
gameData = response["events"]
#Loop through to collect GameDay data.
a=0
while a < len(gameData):
game = str(gameData[a]['name'])
game_ID = str(gameData[a]['id'])
game_Date = str(gameData[a]['date'][:-7])
game_Time = str(gameData[a]['date'][11:-1])
game_Period = str(gameData[a]['status']['period'])
game_Status = str(gameData[a]['status']['type']['description'])
home_Score = str(gameData[a]['competitions'][0]['competitors'][0]['score'])
away_Score = str(gameData[a]['competitions'][0]['competitors'][1]['score'])
#Quick fix to change Clippers Name from LA Clippers to Los Angeles Clippers.
if str(gameData[a]['competitions'][0]['competitors'][0]['team']['displayName']) == 'LA Clippers':
home_Team = 'Los Angeles Clippers'
else:
home_Team = str(gameData[a]['competitions'][0]['competitors'][0]['team']['displayName'])
if str(gameData[a]['competitions'][0]['competitors'][1]['team']['displayName']) == 'LA Clippers':
away_Team = 'Los Angeles Clippers'
else:
away_Team = str(gameData[a]['competitions'][0]['competitors'][1]['team']['displayName'])
#Appends the Game Data to the list.
allGames.append((game_ID, game, home_Team, home_Score, away_Team, away_Score, game_Date, game_Time, game_Period, game_Status))
a+=1
#Gets the Odds from the ODDS-API.
def oddsGetter():
#Parameters for Odds Api.
parameters = {
"sport" : "basketball_nba",
"region" : "uk",
"mkt" : "h2h",
"apiKey" : "",
}
#JSON Response.
response = requests.get("https://api.the-odds-api.com/v3/odds/", params=parameters)
data = response.json()['data']
team0OddsInfo=[]
team1OddsInfo=[]
team0_odds = ''
team1_odds = ''
#Appends the odds info to a list as strings.
for game in data:
for site in game['sites']:
if site['site_key'] == "paddypower":
team0_odds = str(site['odds']['h2h'][0])
team1_odds = str(site['odds']['h2h'][1])
if team0_odds == '':
team0_odds = 0
if team1_odds == '':
team1_odds = 0
team0 = str(game['teams'][0])
team1 = str(game['teams'][1])
startTime = game['commence_time']
gameDate = str(datetime.datetime.utcfromtimestamp(startTime).strftime('%Y-%m-%d %H:%M:%S'))[:-9]
team0OddsInfo.append((team0, team0_odds, gameDate))
team1OddsInfo.append((team1, team1_odds, gameDate))
a=0
#as both lists are the same length, it loops through one and Updates the tables where needed.
while a < len(team0OddsInfo):
query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_Date = %s'
gameDate = (str(team0OddsInfo[a][2]),)
mycursor.execute(query_string, gameDate)
matchedGames = mycursor.fetchall()
b=0
while b < len(matchedGames):
if matchedGames[b][2] == team0OddsInfo[a][0]:
query_list = [team0OddsInfo[a][1], team1OddsInfo[a][1], matchedGames[b][0]]
query_string = 'UPDATE all_games SET Home_Odds = %s, Away_Odds = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
elif matchedGames[b][5] == team0OddsInfo[a][0]:
query_list = [team0OddsInfo[a][1], team1OddsInfo[a][1], matchedGames[b][0]]
query_string = 'UPDATE all_games SET Away_Odds = %s, Home_Odds = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
b+=1
a+=1
#For the console to show when odds were updated.
mydb.commit()
time = datetime.datetime.utcnow()
print('\n' + 'ODDS UPDATE AT: ' + str(time))
print('--------------------------------')
print('--------------------------------')
print(len(team0OddsInfo), "GAME ODDS inserted.")
print('REMAINING REQUESTS:', response.headers['x-requests-remaining'])
print('USED REQUESTS:', response.headers['x-requests-used'])
print('--------------------------------')
print('--------------------------------')
#Block to keep the script running then sleep for time 300 with counter set at 72 for Games every 5min | Odds every 6hr.
counter=72
startTime = time.time()
while True:
#Today, Yesterday and Tomorrow.
today = datetime.date.today()
yesterday = today + datetime.timedelta(days=-1)
tomorrow = today + datetime.timedelta(days=1)
#Removing the - from the dates for the URLs, then making the URLs.
todayShort = str(today).replace('-', '')
yesterdayShort = str(yesterday).replace('-', '')
tomorrowShort = str(tomorrow).replace('-', '')
yesterdayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + yesterdayShort + '-' + yesterdayShort
todayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + todayShort + '-' + todayShort
tomorrowUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + tomorrowShort + '-' + tomorrowShort
newGetter(yesterdayUrl)
newGetter(todayUrl)
newGetter(tomorrowUrl)
#Inserting or updating the table in MYSQL with the games.
c=0
updateCount=0
newGameCount=0
while c < len(allGames):
query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_ID = %s'
gameID = (str(allGames[c][0]),)
mycursor.execute(query_string, gameID)
if mycursor.fetchone():
updateCount+=1
query_list = [allGames[c][1], allGames[c][2], allGames[c][4], allGames[c][5], allGames[c][3], allGames[c][6], allGames[c][7], allGames[c][8], allGames[c][9], allGames[c][0]]
query_string = 'UPDATE all_games SET Game_Name = %s, Home_Team = %s, Away_Team = %s, Away_Score = %s, Home_Score = %s, Game_Date = %s, Game_Time = %s, Game_Period = %s, Game_Status = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
mydb.commit()
else:
newGameCount+=1
query_string = "INSERT INTO basketbet_data.all_games (Game_ID, Game_Name, Home_Team, Home_Odds, Home_Score, Away_Team, Away_Odds, Away_Score, Game_Date, Game_Time, Game_Period, Game_Status) VALUES (%s, %s, %s, 0, %s, %s, 0, %s, %s, %s, %s, %s)"
mycursor.execute(query_string, allGames[c])
mydb.commit()
c+=1
#Prints to console what games were updated and what new games were inserted.
print('----------------------------------------')
print(str(updateCount) + ' GAMES UPDATED, and ' + str(newGameCount) + ' NEW GAMES inserted.')
print('----------------------------------------')
allGames=[]
#Counter for the Odds script.
if counter==72:
oddsGetter()
counter=0
else:
counter+=1
print('\n')
time.sleep(300 - ((time.time() - startTime) % 300)) | 3.046875 | 3 |
neurodocker/tests/test_neurodocker.py | effigies/neurodocker | 1 | 4358 | <filename>neurodocker/tests/test_neurodocker.py
"""Tests for neurodocker.main"""
# Author: <NAME> <<EMAIL>>
from __future__ import absolute_import, unicode_literals
import sys
import pytest
from neurodocker.neurodocker import create_parser, parse_args, main
def test_generate():
args = ("generate -b ubuntu:17.04 -p apt"
" --arg FOO=BAR BAZ"
" --afni version=latest"
" --ants version=2.2.0"
" --freesurfer version=6.0.0"
" --fsl version=5.0.10"
" --user=neuro"
" --miniconda env_name=neuro conda_install=python=3.6.2"
" --user=root"
" --mrtrix3"
" --neurodebian os_codename=zesty download_server=usa-nh"
" --spm version=12 matlab_version=R2017a"
" --no-check-urls"
" --expose 1234 9000"
" --volume /var /usr/bin"
" --label FOO=BAR BAZ=CAT"
" --copy relpath/to/file.txt /tmp/file.txt"
" --add relpath/to/file2.txt /tmp/file2.txt"
" --cmd '--arg1' '--arg2'"
" --workdir /home"
" --install git"
" --user=neuro"
)
main(args.split())
with pytest.raises(SystemExit):
args = "-b ubuntu"
main(args.split())
with pytest.raises(SystemExit):
args = "-p apt"
main(args.split())
with pytest.raises(SystemExit):
main()
args = "generate -b ubuntu -p apt --ants option=value"
with pytest.raises(ValueError):
main(args.split())
def test_generate_opts(capsys):
args = "generate -b ubuntu:17.04 -p apt --no-check-urls {}"
main(args.format('--user=neuro').split())
out, _ = capsys.readouterr()
assert "USER neuro" in out
main(args.format('--add path/to/file.txt /tmp/file.txt').split())
out, _ = capsys.readouterr()
assert 'ADD ["path/to/file.txt", "/tmp/file.txt"]' in out
main(args.format('--copy path/to/file.txt /tmp/file.txt').split())
out, _ = capsys.readouterr()
assert 'COPY ["path/to/file.txt", "/tmp/file.txt"]' in out
main(args.format('--env KEY=VAL KEY2=VAL').split())
out, _ = capsys.readouterr()
assert 'ENV KEY="VAL" \\' in out
assert ' KEY2="VAL"' in out
main(args.format('--expose 1230 1231').split())
out, _ = capsys.readouterr()
assert "EXPOSE 1230 1231" in out
main(args.format('--workdir /home').split())
out, _ = capsys.readouterr()
assert "WORKDIR /home" in out
main(args.format('--install vi').split())
out, _ = capsys.readouterr()
assert "vi" in out
main(args.format('--instruction RUNecho').split())
out, _ = capsys.readouterr()
assert "RUNecho" in out
def test_generate_from_json(capsys, tmpdir):
import json
cmd = "generate -b debian:stretch -p apt --c3d version=1.0.0"
main(cmd.split())
true, _ = capsys.readouterr()
specs = {'check_urls': True,
'generation_timestamp': '2017-08-31 21:49:04',
'instructions': [['base', 'debian:stretch'],
['c3d', {'version': '1.0.0'}]],
'neurodocker_version': '0.2.0-18-g9227b17',
'pkg_manager': 'apt'}
str_specs = json.dumps(specs)
filepath = tmpdir.join("specs.json")
filepath.write(str_specs)
gen_cmd = "generate --file {}".format(filepath)
main(gen_cmd.split())
test, _ = capsys.readouterr()
# These indices chop off the header (with timestamp) and the layer that
# saves to JSON (with timestamp).
sl = slice(8, -19)
assert true.split('\n')[sl] == test.split('\n')[sl]
def test_generate_no_print(capsys):
args = ['generate', '-b', 'ubuntu:17.04', '-p', 'apt', '--no-check-urls']
main(args)
out, _ = capsys.readouterr()
assert "FROM" in out and "RUN" in out
args.append('--no-print-df')
main(args)
out, _ = capsys.readouterr()
assert not out
def test_generate_save(tmpdir):
outfile = tmpdir.join("test.txt")
args = ['generate', '-b', 'ubuntu:17.04', '-p', 'apt', '--mrtrix3',
'use_binaries=false', '--no-print-df', '-o', outfile.strpath,
'--no-check-urls']
main(args)
assert outfile.read(), "saved Dockerfile is empty"
assert "git clone https://github.com/MRtrix3/mrtrix3.git" in outfile.read()
| 2.296875 | 2 |
fuzzers/011-cle-ffconfig/generate.py | tmichalak/prjuray | 39 | 4359 | <filename>fuzzers/011-cle-ffconfig/generate.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
FDCE Primitive: D Flip-Flop with Clock Enable and Asynchronous Clear
FDPE Primitive: D Flip-Flop with Clock Enable and Asynchronous Preset
FDRE Primitive: D Flip-Flop with Clock Enable and Synchronous Reset
FDSE Primitive: D Flip-Flop with Clock Enable and Synchronous Set
LDCE Primitive: Transparent Data Latch with Asynchronous Clear and Gate Enable
LDPE Primitive: Transparent Data Latch with Asynchronous Preset and Gate Enable
'''
from prims import isff, isl
from utils.segmaker import Segmaker
segmk = Segmaker("design.bits", bits_per_word=16)
def loadtop():
'''
i,prim,loc,bel
0,FDPE,SLICE_X12Y100,C5FF
1,FDPE,SLICE_X15Y100,A5FF
2,FDPE_1,SLICE_X16Y100,B5FF
3,LDCE_1,SLICE_X17Y100,BFF
'''
f = open('top.txt', 'r')
f.readline()
ret = {}
for l in f:
i, prim, loc, bel, init = l.split(",")
i = int(i)
init = int(init)
ret[loc] = (i, prim, loc, bel, init)
return ret
top = loadtop()
def vs2i(s):
return {"1'b0": 0, "1'b1": 1}[s]
print("Loading tags from design.txt")
with open("design.txt", "r") as f:
for line in f:
'''
puts $fp "$type $tile $grid_x $grid_y $ff $bel_type $used $usedstr"
CLEM CLEM_X10Y137 30 13 SLICE_X13Y137/AFF REG_INIT 1 FDRE
CLEM CLEM_X10Y137 30 13 SLICE_X12Y137/D2FF FF_INIT 0
'''
line = line.split()
tile_type = line[0]
tile_name = line[1]
grid_x = line[2]
grid_y = line[3]
# Other code uses BEL name
# SLICE_X12Y137/D2FF
site_ff_name = line[4]
site, ff_name = site_ff_name.split('/')
ff_type = line[5]
used = int(line[6])
cel_prim = None
cel_name = None
if used:
cel_name = line[7]
cel_prim = line[8]
cinv = int(line[9])
init = vs2i(line[10])
# A B C D E F G H
which = ff_name[0]
# LUT6 vs LUT5 FF
is2 = '2' in ff_name
if used:
segmk.add_site_tag(site, "%s.ZINI" % ff_name, 1 ^ init)
'''
On name:
The primitives you listed have a control input to set the FF value to zero (clear/reset),
the other three primitives have a control input that sets the FF value to one.
Z => inversion
'''
segmk.add_site_tag(site, "%s.ZRST" % ff_name,
cel_prim in ('FDRE', 'FDCE', 'LDCE'))
segmk.compile()
segmk.write()
| 1.992188 | 2 |
hmc/integrators/states/riemannian_leapfrog_state.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 1 | 4360 | from typing import Callable
import numpy as np
from hmc.integrators.states.leapfrog_state import LeapfrogState
from hmc.integrators.fields import riemannian
from hmc.linalg import solve_psd
class RiemannianLeapfrogState(LeapfrogState):
"""The Riemannian leapfrog state uses the Fisher information matrix to provide
a position-dependent Riemannian metric. As such, computing the gradients of
the Hamiltonian requires higher derivatives of the metric, which vanish in
the Euclidean case.
"""
def __init__(self,
position: np.ndarray,
momentum: np.ndarray):
super().__init__(position, momentum)
self._jac_metric: np.ndarray
self._grad_logdet_metric: np.ndarray
@property
def requires_update(self) -> bool:
o = self.log_posterior is None or \
self.grad_log_posterior is None or \
self.metric is None or \
self.inv_metric is None or \
self.jac_metric is None or \
self.grad_logdet_metric is None
return o
@property
def jac_metric(self):
return self._jac_metric
@jac_metric.setter
def jac_metric(self, value):
self._jac_metric = value
@jac_metric.deleter
def jac_metric(self):
del self._jac_metric
@property
def grad_logdet_metric(self):
return self._grad_logdet_metric
@grad_logdet_metric.setter
def grad_logdet_metric(self, value):
self._grad_logdet_metric = value
@grad_logdet_metric.deleter
def grad_logdet_metric(self):
del self._grad_logdet_metric
def update(self, auxiliaries: Callable):
num_dims = len(self.position)
log_posterior, grad_log_posterior, metric, jac_metric = auxiliaries(self.position)
jac_metric = np.swapaxes(jac_metric, 0, -1)
inv_metric, sqrtm_metric = solve_psd(metric, return_chol=True)
grad_logdet_metric = riemannian.grad_logdet(inv_metric, jac_metric, num_dims)
self.log_posterior = log_posterior
self.grad_log_posterior = grad_log_posterior
self.metric = metric
self.sqrtm_metric = sqrtm_metric
self.inv_metric = inv_metric
self.jac_metric = jac_metric
self.grad_logdet_metric = grad_logdet_metric
self.velocity = riemannian.velocity(inv_metric, self.momentum)
self.force = riemannian.force(self.velocity, grad_log_posterior, jac_metric, grad_logdet_metric)
def clear(self):
super().clear()
del self.jac_metric
del self.grad_logdet_metric
del self.metric
del self.inv_metric
del self.logdet_metric
del self.sqrtm_metric
| 2.578125 | 3 |
MultirangerTest.py | StuartLiam/DroneNavigationOnboard | 0 | 4361 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2017 Bitcraze AB
#
# Crazyflie Python Library
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Example scipts that allows a user to "push" the Crazyflie 2.0 around
using your hands while it's hovering.
This examples uses the Flow and Multi-ranger decks to measure distances
in all directions and tries to keep away from anything that comes closer
than 0.2m by setting a velocity in the opposite direction.
The demo is ended by either pressing Ctrl-C or by holding your hand above the
Crazyflie.
For the example to run the following hardware is needed:
* Crazyflie 2.0
* Crazyradio PA
* Flow deck
* Multiranger deck
"""
import logging
import sys
import time
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.positioning.motion_commander import MotionCommander
from cflib.utils.multiranger import Multiranger
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.patches as patches
URI = 'radio://0/80/2M'
if len(sys.argv) > 1:
URI = sys.argv[1]
# Only output errors from the logging framework
logging.basicConfig(level=logging.ERROR)
def is_close(range):
MIN_DISTANCE = 0.2 # m
if range is None:
return False
else:
return range < MIN_DISTANCE
if __name__ == '__main__':
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=False)
rangeArray = []
cf = Crazyflie(rw_cache='./cache')
with SyncCrazyflie(URI, cf=cf) as scf:
with MotionCommander(scf) as motion_commander:
with Multiranger(scf) as multiranger:
motion_commander.start_turn_left(90)
rangeArray.append(multiranger.front)
time.sleep(0.05)
plt.plot(rangeArray) | 2.1875 | 2 |
employees/choices.py | sauli6692/barbershop | 0 | 4362 | from django.utils.translation import ugettext_lazy as _
USER_TYPE_STAFF = 'STAFF'
USER_TYPE_ADMIN = 'ADMIN'
USER_TYPE_BARBER = 'BARBER'
USER_TYPE_CHOICES = (
(USER_TYPE_STAFF, _('Dev')),
(USER_TYPE_ADMIN, _('Admin')),
(USER_TYPE_BARBER, _('Barber')),
) | 1.734375 | 2 |
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py | google-ar/chromium | 2,151 | 4363 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.chrome_proxy_shared_page_state import ChromeProxySharedPageState
from telemetry.page import page as page_module
from telemetry import story
class HTML5TestPage(page_module.Page):
def __init__(self, url, page_set):
super(HTML5TestPage, self).__init__(url=url, page_set=page_set,
shared_page_state_class=ChromeProxySharedPageState)
class HTML5TestStorySet(story.StorySet):
""" Chrome proxy test page for traffic over https. """
def __init__(self):
super(HTML5TestStorySet, self).__init__()
urls_list = [
'http://html5test.com/',
]
for url in urls_list:
self.AddStory(HTML5TestPage(url, self))
| 2.53125 | 3 |
lessons/sqlite_example/database.py | eliranM98/python_course | 6 | 4364 | <reponame>eliranM98/python_course
"""
in this example we want to create a user credentials database with:
user_id & password
logger showing connection logs, DB version, errors during fetching & executing
"""
import sqlite3
from lessons.sqlite_example.log import create as create_logger
class Commands:
create_users_table = '''
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id text,
password text
);
'''
add_user = 'INSERT INTO users (user_id, password) VALUES (\'{}\', \'{}\');'
get_users = 'SELECT user_id, password FROM users;'
get_user_by_user_id = 'SELECT user_id, password FROM users WHERE user_id = \'{}\';'
get_user_by_id = 'SELECT user_id, password FROM users WHERE id = \'{}\';'''
get_last_user = 'SELECT user_id, password FROM users ORDER BY ID DESC LIMIT 1'
drop_table = 'DROP TABLE IF EXISTS {};'
class DataBase:
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
"""
def __init__(self, db_file, log, commands=None):
""" database connection """
try:
self.log = log
self.log.info('connecting to database')
self.connection = sqlite3.connect(db_file)
self.cursor = self.connection.cursor()
self.log.info('connection success')
self.log.info('sqlite3 version {}'.format(sqlite3.version))
if commands is None:
commands = Commands
self.command = commands
except Exception as e:
self.log.exception(e)
raise Exception(e)
def execute(self, command, *args, **kwargs):
try:
return self.cursor.execute(command)
except Exception as e:
self.log.exception(e)
def fetch(self, command=None, *args, **kw):
if command is not None:
self.execute(command)
try:
return self.cursor.fetchall()
except Exception as e:
self.log.exception(e)
def export_from_table_to_file(self, table, file_name, titles, permission='w'):
try:
self.cursor.execute("select * from {}".format(table))
table_list = self.cursor.fetchall()
with open(file_name, permission) as f:
f.write(','.join(titles) + '\n')
for i in table_list:
s = []
for a in i:
s.append(str(a))
f.write(','.join(s) + '\n')
except Exception as e:
self.log.exception(e)
def fetch_log(self, *args, **kw):
rows = self.fetch(*args, **kw)
if rows is not None:
for r in rows:
self.log.info(r)
return rows
class DataBaseExtention(DataBase):
# def get_user_credentials(self, user=None, id=None):
# users = self.fetch(self.command.get_users)
# if user is not None:
# for i in users:
# if user in i:
# return i
# if id is not None:
# return users[id][1:]
# return users[-1][1:]
def get_user_credentials(self, user=None, id=None):
if user is not None:
user_credentials = self.fetch(self.command.get_user_by_user_id.format(user))
elif id is not None:
user_credentials = self.fetch(self.command.get_user_by_id.format(id))
else:
user_credentials = self.fetch(self.command.get_last_user)
if len(user_credentials) > 0:
return user_credentials[0]
if "__main__" == __name__:
import os
log_file = os.path.dirname(os.path.abspath(__file__)) + '\\log.txt'
db_file = os.path.dirname(os.path.abspath(__file__)) + '\\db.db'
log = create_logger(log_file=log_file)
database = DataBaseExtention(db_file, log)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# database.execute(database.command.drop_table.format('users'))
# database.execute(database.command.create_users_table)
# database.execute(database.command.add_user.format('cs0008', '123123a'))
# database.execute(database.command.add_user.format('af0006', '123123a'))
# database.execute(database.command.add_user.format('jh0003', '123123a'))
# database.execute(database.command.add_user.format('kb0004', '123123a'))
# database.execute(database.command.add_user.format('op0001', '123123a'))
# database.execute(database.command.add_user.format('gv0001', '123123a'))
# database.execute(database.command.add_user.format('pm0001', '123123a'))
# database.execute(database.command.add_user.format('ps0001', '123123a'))
# database.execute(database.command.add_user.format('qa0000', '123123a'))
# user_credentials = database.get_user_credentials(id='14')
# database.connection.commit()
# database.connection.close()
# print(user_credentials)
# create a simple database with websites table that includes (
# url: varchar(1024),
# popularity_score: integer,
# monthly_visitations: integer
# )
# database.command.create_websites_table = '''
# CREATE TABLE IF NOT EXISTS websites (
# id INTEGER PRIMARY KEY AUTOINCREMENT,
# url TEXT,
# popularity_score INTEGER,
# monthly_visitations INTEGER
# )
# '''
# database.command.add_website = 'INSERT INTO websites (url, popularity_score, monthly_visitations) VALUES (\'{}\', \'{}\', \'{}\');'
# database.execute(database.command.create_websites_table)
# database.execute(database.command.add_website.format('https://www.google.com', 5, 4000000000))
# database.execute(database.command.add_website.format('https://www.ynet.com', 3, 5000000))
# database.execute(database.command.add_website.format('https://www.youtube.com', 6, 1300000000))
# database.execute(database.command.add_website.format('https://www.python.org', 5, 1000000))
# database.command.get_site = 'SELECT url, popularity_score, monthly_visitations FROM websites WHERE url = \'{}\';'
# url, popularity, visitations = database.fetch(database.command.get_site.format('https://www.python.org'))[0]
#
# print(url, popularity, visitations)
database.export_from_table_to_file(
table='websites',
file_name='exported.csv',
titles=('id', 'url', 'popularity_score', 'monthly_visitations')
)
# database.connection.commit()
database.connection.close()
| 4.375 | 4 |
backend/app/projectx/routing.py | emmawoollett/projectx | 0 | 4365 | <gh_stars>0
from django.urls import re_path
from projectx.consumers import UserWebSocketConsumer
from .consumers import UserWebSocketConsumer
websocket_urlpatterns = [
re_path(r"^ws/$", UserWebSocketConsumer.as_asgi()),
]
| 1.625 | 2 |
aldryn_search/cms_apps.py | lab360-ch/aldryn-search | 11 | 4366 | <reponame>lab360-ch/aldryn-search<filename>aldryn_search/cms_apps.py<gh_stars>10-100
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from .conf import settings
class AldrynSearchApphook(CMSApp):
name = _("aldryn search")
def get_urls(self, *args, **kwargs):
return ['aldryn_search.urls']
if settings.ALDRYN_SEARCH_REGISTER_APPHOOK:
apphook_pool.register(AldrynSearchApphook)
| 1.703125 | 2 |
BizPy/openpyxl/20200513/horizontal_chart.py | t2y/python-study | 18 | 4367 | <filename>BizPy/openpyxl/20200513/horizontal_chart.py
import pandas as pd
from openpyxl import Workbook
from openpyxl.chart import BarChart, Reference
wb = Workbook()
ws = wb.active
df = pd.read_csv('population.csv')
ws.append(df.columns.tolist())
for row in df.values:
ws.append(list(row))
row_length = 1 + len(df.values)
values = Reference(ws, min_col=2, max_col=2, min_row=1, max_row=row_length)
categories = Reference(ws, min_col=1, min_row=2, max_row=row_length)
chart = BarChart()
chart.type = 'bar'
chart.style = 11
chart.shape = 4
chart.title = '都道府県別の人口'
chart.x_axis.title = '都道府県'
chart.y_axis.title = '人口'
chart.add_data(values, titles_from_data=True)
chart.set_categories(categories)
ws.add_chart(chart, 'A9')
wb.save('population_horizontal.xlsx')
| 2.953125 | 3 |
changes/api/serializer/models/logsource.py | alex/changes | 1 | 4368 | <filename>changes/api/serializer/models/logsource.py<gh_stars>1-10
from changes.api.serializer import Serializer, register
from changes.models.log import LogSource
@register(LogSource)
class LogSourceSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'job': {
'id': instance.job_id.hex,
},
'name': instance.name,
'step': instance.step,
'dateCreated': instance.date_created,
}
| 1.8125 | 2 |
examples/prostate/data_preparation/utils/nrrd_to_nifti.py | IsaacYangSLA/NVFlare | 0 | 4369 | <filename>examples/prostate/data_preparation/utils/nrrd_to_nifti.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import nibabel as nib
import nrrd
import numpy as np
parser = argparse.ArgumentParser("Convert nrrd label to nifti with reference image file for affine")
parser.add_argument("--input_path", help="Input nrrd path", type=str)
parser.add_argument("--reference_path", help="Reference image path", type=str)
parser.add_argument("--output_path", help="Output nifti path", type=str)
args = parser.parse_args()
img = nib.load(args.reference_path)
img_affine = img.affine
nrrd = nrrd.read(args.input_path)
data = np.flip(nrrd[0], axis=1)
nft_img = nib.Nifti1Image(data, img_affine)
nib.save(nft_img, args.output_path)
| 2.203125 | 2 |
setup.py | jszakmeister/rst2ctags | 23 | 4370 | <reponame>jszakmeister/rst2ctags
from setuptools import setup
import io
import os
import re
version_re = re.compile(r'^__version__ = "([^"]*)"$')
# Find the version number.
with open('rst2ctags.py', 'r') as f:
for line in f:
line = line.rstrip()
m = version_re.match(line)
if m:
version = m.group(1)
break
else:
raise RuntimeError("Couldn't find version string in rst2ctags.py")
# Load the description.
readme_path = os.path.join(os.path.dirname(__file__), 'README.rst')
with io.open(readme_path, encoding='utf-8') as f:
long_description = f.read()
setup(
name='rst2ctags',
description='Generates ctags-compatible output for the sections of a '
'reStructuredText document.',
long_description=long_description,
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jszakmeister/rst2ctags',
version=version,
py_modules=['rst2ctags'],
zip_safe=True,
entry_points={
'console_scripts': [
'rst2ctags = rst2ctags:cli_main',
],
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Text Processing',
'Topic :: Text Processing :: Indexing',
'Topic :: Utilities',
]
)
| 2.078125 | 2 |
py-ws/hardshare/cli.py | rerobots/hardshare | 8 | 4371 | #!/usr/bin/env python
# Copyright (C) 2018 rerobots, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line interface
"""
import argparse
import json
import logging
import logging.handlers
import os
import os.path
import subprocess
import sys
import uuid
import yaml
from aiohttp.client_exceptions import ClientConnectorError as ConnectionError
from .core import WorkspaceInstance
from .mgmt import get_local_config, add_key, add_ssh_path, list_local_keys
from .mgmt import find_wd, modify_local, rm_wd
from .api import HSAPIClient
from .err import Error as HSError
from .addons import camera_main, stop_cameras
from .addons import add_cmdsh, rm_cmdsh, add_vnc, rm_vnc, add_mistyproxy, rm_mistyproxy
def get_config_with_index(id_prefix=None):
try:
config = get_local_config()
except:
print('error loading configuration data. does it exist?')
return None, None, 1
if len(config['wdeployments']) == 0:
print(('ERROR: no workspace deployment in local configuration.'))
return config, None, 1
if isinstance(id_prefix, list):
if len(id_prefix) == 0:
if len(config['wdeployments']) > 1:
print('ERROR: ambiguous command: more than 1 workspace deployment defined.')
return config, None, 1
index = [0]
else:
indices = []
for idp in id_prefix:
index = find_wd(config, idp)
if index is None:
print('ERROR: given prefix does not match precisely 1 workspace deployment')
return config, None, 1
indices.append(index)
index = indices
elif id_prefix:
index = find_wd(config, id_prefix)
if index is None:
print('ERROR: given prefix does not match precisely 1 workspace deployment')
return config, None, 1
else:
if len(config['wdeployments']) > 1:
print('ERROR: ambiguous command: more than 1 workspace deployment defined.')
return config, None, 1
index = 0
return config, index, 0
def main(argv=None):
pkglogger = logging.getLogger('hardshare')
pkglogger.setLevel(logging.WARNING)
loghandler = logging.handlers.WatchedFileHandler(filename='hardshare_client.log', mode='a', delay=True)
loghandler.setLevel(logging.DEBUG)
loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});'
' %(asctime)s ; %(message)s'
.format(os.getpid())))
pkglogger.addHandler(loghandler)
if argv is None:
argv = sys.argv[1:]
argparser = argparse.ArgumentParser(description=('Command-line interface'
' for the hardshare client'), add_help=False)
argparser.add_argument('-h', '--help', dest='print_help',
action='store_true', default=False,
help='print this help message and exit')
argparser.add_argument('-V', '--version', action='store_true', default=False,
help='print version of hardshare (this) package.',
dest='print_version')
argparser.add_argument('-v', '--verbose', action='store_true', default=False,
help='print verbose messages about actions by the hardshare client',
dest='verbose')
argparser.add_argument('--format', metavar='FORMAT',
default=None, type=str,
help=('special output formatting (default is no special formatting); '
'options: YAML , JSON'),
dest='output_format')
subparsers = argparser.add_subparsers(dest='command')
subparsers.add_parser('version', help='print version number and exit.')
help_parser = subparsers.add_parser('help', help='print this help message and exit')
help_parser.add_argument('help_target_command', metavar='COMMAND', type=str, nargs='?')
config_commanddesc = 'manage local and remote configuration'
config_parser = subparsers.add_parser('config',
description=config_commanddesc,
help=config_commanddesc)
config_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment for configuration changes'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
config_parser.add_argument('-c', '--create', action='store_true', default=False,
dest='create_config',
help='if no local configuration is found, then create one')
config_parser.add_argument('--add-terminate-prog', metavar='PATH',
dest='add_terminate_prog', default=None,
help='add program to list of commands to execute')
config_parser.add_argument('--rm-terminate-prog', metavar='PATH',
dest='rm_terminate_prog', default=None,
help=('remove program from list of commands to execute; '
'for example, '
'copy-and-paste value shown in `hardshare config -l` here'))
config_parser.add_argument('--add-key', metavar='FILE',
dest='new_api_token',
help='add new account key')
config_parser.add_argument('--add-ssh-path', metavar='PATH',
dest='new_ssh_path',
help='add path to SSH key pair (does NOT copy the key)')
config_parser.add_argument('--add-raw-device', metavar='PATH', type=str,
dest='raw_device_path', default=None,
help='add device file to present in container')
config_parser.add_argument('--cprovider', metavar='CPROVIDER', type=str,
dest='cprovider', default=None,
help='select a container provider: docker, podman, proxy')
config_parser.add_argument('--assign-image', metavar='IMG', type=str,
dest='cprovider_img', default=None,
help='assign image for cprovider to use (advanced option)')
config_parser.add_argument('--rm-raw-device', metavar='PATH', type=str,
dest='remove_raw_device_path', default=None,
help='remove device previously marked for inclusion in container')
config_parser.add_argument('--add-init-inside', metavar='CMD', type=str,
dest='add_init_inside', default=None,
help='add command to be executed inside container')
config_parser.add_argument('--rm-init-inside', action='store_true', default=False,
dest='rm_init_inside',
help='remove (empty) list of commands for inside initialization')
config_parser.add_argument('-p', '--prune', action='store_true', default=False,
dest='prune_err_keys',
help=('delete files in local key directory that'
' are not valid; to get list of'
' files with errors, try `--list`'))
config_parser.add_argument('-l', '--list', action='store_true', default=False,
dest='list_config',
help='list configuration')
config_parser.add_argument('--local', action='store_true', default=False,
dest='only_local_config',
help='only show local configuration data')
config_parser.add_argument('--include-dissolved', action='store_true', default=False,
dest='include_dissolved',
help='include configuration data of dissolved workspace deployments')
config_parser.add_argument('--declare', metavar='ID',
dest='declared_wdeployment_id', default=None,
help=('declare that workspace deployment is'
' hosted here. (this only works if it'
' has been previously registered under'
' the same user account.)'))
rules_commanddesc = 'modify access rules (also known as capabilities or permissions)'
rules_parser = subparsers.add_parser('rules',
description=rules_commanddesc,
help=rules_commanddesc)
rules_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
rules_parser.add_argument('-l', '--list', action='store_true', default=False,
dest='list_rules',
help='list all rules')
rules_parser.add_argument('--permit-me', action='store_true', default=False,
dest='add_rule_permit_me',
help='permit instantiations by you (the owner)')
rules_parser.add_argument('--drop-all', action='store_true', default=False,
dest='drop_all_rules',
help=('remove all access rules; '
'note that access is denied by default, '
'including to you (the owner)'))
rules_parser.add_argument('--permit-all', action='store_true', default=False,
dest='add_rule_permit_all',
help='permit instantiations by anyone')
register_commanddesc = 'register new workspace deployment'
register_parser = subparsers.add_parser('register',
description=register_commanddesc,
help=register_commanddesc)
register_parser.add_argument('--permit-more', action='store_false', default=True,
dest='register_at_most_one',
help=('permit registration of more than 1 wdeployment; '
'default is to fail if local configuration already '
'has wdeployment declared'))
check_commanddesc = 'check registration of this workspace deployment'
check_parser = subparsers.add_parser('check',
description=check_commanddesc,
help=check_commanddesc)
check_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment to check'
' (can be unique prefix)'))
dissolve_commanddesc = ('dissolve this workspace deployment, making it'
' unavailable for any future use'
' (THIS CANNOT BE UNDONE)')
dissolve_parser = subparsers.add_parser('dissolve',
description=dissolve_commanddesc,
help=dissolve_commanddesc)
dissolve_parser.add_argument('wdid', metavar='ID', nargs='?', default=None,
help='id of workspace deployment to dissolve')
status_commanddesc = 'get status of local instances and daemon'
status_parser = subparsers.add_parser('status',
description=status_commanddesc,
help=status_commanddesc)
status_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix)'))
advertise_commanddesc = 'advertise availability, accept new instances'
advertise_parser = subparsers.add_parser('ad',
description=advertise_commanddesc,
help=advertise_commanddesc)
advertise_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment to advertise'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
advertise_parser.add_argument('-d', '--daemon', action='store_true', default=False,
help='detach from invoking terminal (i.e., run as daemon)',
dest='become_daemon')
attach_camera_commanddesc = 'attach camera stream to workspace deployments'
attach_camera_parser = subparsers.add_parser('attach-camera',
description=attach_camera_commanddesc,
help=attach_camera_commanddesc)
attach_camera_parser.add_argument('camera', default=0,
type=int,
help=('on Linux, 0 typically implies /dev/video0; '
'if you only have one camera, then try 0'))
attach_camera_parser.add_argument('id_prefix', metavar='ID', nargs='*', default=None,
help=('id of workspace deployment on which to attach'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
attach_camera_parser.add_argument('--width-height', metavar='W,H', type=str,
dest='attach_camera_res', default=None,
help=('width and height of captured images; '
'default depends on the supporting drivers'))
attach_camera_parser.add_argument('--crop', metavar='CROPCONFIG', type=str,
dest='attach_camera_crop_config', default=None,
help=('image crop configuration; '
'default: all wdeployments get full images'))
attach_camera_parser.add_argument('-d', '--daemon', action='store_true', default=False,
help='detach from invoking terminal (i.e., run as daemon)',
dest='become_daemon')
stop_cameras_commanddesc = 'stop camera streams previously started by attach-camera'
stop_cameras_parser = subparsers.add_parser('stop-cameras',
description=stop_cameras_commanddesc,
help=stop_cameras_commanddesc)
stop_cameras_parser.add_argument('-a', '--all', action='store_true', default=False,
help=('stop all attached cameras associated with this '
'user account, whether or not started on this host'),
dest='all_cameras')
addon_cmdsh_commanddesc = 'manage add-on cmdsh for your workspace deployments'
addon_cmdsh_parser = subparsers.add_parser('addon-cmdsh',
description=addon_cmdsh_commanddesc,
help=addon_cmdsh_commanddesc)
addon_cmdsh_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_cmdsh_parser.add_argument('--add', action='store_true', default=False,
help='add add-on cmdsh to enable terminal access via WebSockets',
dest='add_addon_cmdsh')
addon_cmdsh_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on cmdsh',
dest='rm_addon_cmdsh')
addon_vnc_commanddesc = 'manage add-on vnc for your workspace deployments'
addon_vnc_parser = subparsers.add_parser('addon-vnc',
description=addon_vnc_commanddesc,
help=addon_vnc_commanddesc)
addon_vnc_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_vnc_parser.add_argument('--add', action='store_true', default=False,
help='add add-on vnc to enable VNC via rerobots.net',
dest='add_addon_vnc')
addon_vnc_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on vnc',
dest='rm_addon_vnc')
addon_mistyproxy_commanddesc = 'manage add-on mistyproxy for your workspace deployments'
addon_mistyproxy_parser = subparsers.add_parser('addon-mistyproxy',
description=addon_mistyproxy_commanddesc,
help=addon_mistyproxy_commanddesc)
addon_mistyproxy_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_mistyproxy_parser.add_argument('--add', action='store_true', default=False,
help='add add-on mistyproxy to allow HTTP proxy to Misty robots',
dest='add_addon_mistyproxy')
addon_mistyproxy_parser.add_argument('--ip', metavar='ADDRESS', default=None,
help='IP address of the Misty robot',
dest='targetaddr')
addon_mistyproxy_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on mistyproxy',
dest='rm_addon_mistyproxy')
terminate_commanddesc = 'mark as unavailable; optionally wait for current instance to finish'
terminate_parser = subparsers.add_parser('stop-ad',
description=terminate_commanddesc,
help=terminate_commanddesc)
terminate_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix)'))
terminate_parser.add_argument('-f', '--force', action='store_true', default=False,
help=('if there is an active instance, then'
' stop it without waiting'),
dest='force_terminate')
help_message_purge = ('if the server indicates that an instance is active,'
' but there is not one or it is otherwise in a'
' non-recoverable state, then mark it remotely as'
' terminated and attempt local clean-up; this'
' command is a last resort. First, try `hardshare'
' terminate` without --purge.')
terminate_parser.add_argument('--purge', action='store_true', default=False,
help=help_message_purge,
dest='purge_supposed_instance')
argv_parsed = argparser.parse_args(argv)
if argv_parsed.print_version or argv_parsed.command == 'version':
from . import __version__ as hardshare_pkg_version
print(hardshare_pkg_version)
return 0
elif argv_parsed.command is None or argv_parsed.command == 'help':
if hasattr(argv_parsed, 'help_target_command') and argv_parsed.help_target_command is not None:
if argv_parsed.help_target_command == 'config':
config_parser.print_help()
elif argv_parsed.help_target_command == 'rules':
rules_parser.print_help()
elif argv_parsed.help_target_command == 'register':
register_parser.print_help()
elif argv_parsed.help_target_command == 'check':
check_parser.print_help()
elif argv_parsed.help_target_command == 'dissolve':
dissolve_parser.print_help()
elif argv_parsed.help_target_command == 'status':
status_parser.print_help()
elif argv_parsed.help_target_command == 'attach-camera':
attach_camera_parser.print_help()
elif argv_parsed.help_target_command == 'stop-cameras':
stop_cameras_parser.print_help()
elif argv_parsed.help_target_command == 'addon-cmdsh':
addon_cmdsh_parser.print_help()
elif argv_parsed.help_target_command == 'addon-vnc':
addon_vnc_parser.print_help()
elif argv_parsed.help_target_command == 'addon-mistyproxy':
addon_mistyproxy_parser.print_help()
elif argv_parsed.help_target_command == 'ad':
advertise_parser.print_help()
elif argv_parsed.help_target_command == 'stop-ad':
terminate_parser.print_help()
else:
argparser.print_help()
else:
argparser.print_help()
return 0
if argv_parsed.verbose:
pkglogger.setLevel(logging.DEBUG)
if argv_parsed.output_format is not None:
output_format = argv_parsed.output_format.lower()
if output_format not in ['yaml', 'json']:
print('output format unrecognized: {}'.format(argv_parsed.output_format))
return 1
else:
output_format = None
try:
ac = HSAPIClient()
except:
ac = None
if argv_parsed.command == 'status':
try:
config = get_local_config()
except:
print('error loading configuration data. does it exist?')
return 1
if argv_parsed.id_prefix is None:
if len(config['wdeployments']) == 0:
findings = [WorkspaceInstance.inspect_instance()]
else:
findings = []
for wd in config['wdeployments']:
findings.append(WorkspaceInstance.inspect_instance(wdeployment=wd))
else:
findings = []
for m in find_wd(config, argv_parsed.id_prefix, one_or_none=False):
findings.append(WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][m]))
if output_format == 'json':
print(json.dumps(findings))
else: # output_format == 'yaml'
print(yaml.dump(findings, default_flow_style=False))
elif argv_parsed.command == 'attach-camera':
config, indices, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployments = [config['wdeployments'][jj]['id'] for jj in indices]
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
if argv_parsed.attach_camera_res:
width, height = [int(x) for x in argv_parsed.attach_camera_res.split(',')]
if width < 1 or height < 1:
print('Width, height must be positive')
return 1
else:
width, height = None, None
if argv_parsed.attach_camera_crop_config:
crop = json.loads(argv_parsed.attach_camera_crop_config)
else:
crop = None
if argv_parsed.become_daemon:
if os.fork() != 0:
return 0
os.close(0)
os.close(1)
os.close(2)
try:
camera_main(wdeployments, tok=tok, dev=argv_parsed.camera, width=width, height=height, crop=crop)
except ConnectionError:
if not argv_parsed.become_daemon:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'stop-cameras':
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
stop_cameras(tok, allcam=argv_parsed.all_cameras)
except ConnectionError:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'addon-cmdsh':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_cmdsh:
add_cmdsh(wdeployment_id, tok)
elif argv_parsed.rm_addon_cmdsh:
rm_cmdsh(wdeployment_id, tok)
else:
print('Use `hardshare addon-cmdsh` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-cmdsh')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'addon-vnc':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_vnc:
add_vnc(wdeployment_id, tok)
elif argv_parsed.rm_addon_vnc:
rm_vnc(wdeployment_id, tok)
else:
print('Use `hardshare addon-vnc` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-vnc')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'addon-mistyproxy':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_mistyproxy:
if argv_parsed.targetaddr is None:
print('--ip is required with --add')
return 1
add_mistyproxy(wdeployment_id, tok, argv_parsed.targetaddr)
elif argv_parsed.rm_addon_mistyproxy:
rm_mistyproxy(wdeployment_id, tok)
else:
print('Use `hardshare addon-mistyproxy` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-mistyproxy')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'ad':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
if 'ssh_key' not in config or config['ssh_key'] is None:
print('WARNING: local configuration does not declare SSH key.\n'
'Instances with connection type sshtun cannot launch.')
pkglogger.removeHandler(loghandler)
if argv_parsed.become_daemon:
if os.fork() != 0:
return 0
os.close(0)
os.close(1)
os.close(2)
else:
pkglogger.addHandler(logging.StreamHandler())
logfname = 'hardshare_client.{}.log'.format(config['wdeployments'][index]['id'])
loghandler = logging.FileHandler(filename=logfname, mode='a', delay=True)
loghandler.setLevel(logging.DEBUG)
loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});'
' %(asctime)s ; %(message)s'
.format(os.getpid())))
pkglogger.addHandler(loghandler)
return ac.run_sync(config['wdeployments'][index]['id'])
elif argv_parsed.command == 'stop-ad':
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
if argv_parsed.purge_supposed_instance:
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--purge not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
findings = WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][index])
if 'container' in findings:
try:
subprocess.check_call([cprovider, 'rm', '-f',
findings['container']['name']],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except:
print('failed to stop container `{}`'.format(findings['container']['name']))
return 1
return 0
else:
print('failed to detect local instance')
return 1
else:
if ac is None:
print('cannot terminate without valid API client')
return 1
try:
ac.terminate(config['wdeployments'][index]['id'])
except FileNotFoundError:
print('ERROR: cannot reach daemon. Does it exist? (Try `hardshare status`)')
return 1
return 0
elif argv_parsed.command == 'register':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
try:
print(ac.register_new(at_most_one=argv_parsed.register_at_most_one))
except HSError as err:
print('ERROR: {}'.format(err))
return 1
except ConnectionError:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'rules':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
if argv_parsed.id_prefix is None:
wdid = None
else:
try:
wdid = str(uuid.UUID(argv_parsed.id_prefix))
except:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
print('The given ID does not appear to be valid.')
return 1
wdid = config['wdeployments'][index]['id']
if argv_parsed.list_rules:
try:
res = ac.get_access_rules(wdid)
except Exception as err:
print('{}'.format(err))
return 1
if 'err' in res:
if res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
res['comments'] = [
'Access is denied unless a rule explicitly permits it.',
]
if output_format == 'json':
print(json.dumps(res))
else: # output_format == 'yaml'
print(yaml.dump(res, default_flow_style=False))
elif argv_parsed.drop_all_rules or argv_parsed.add_rule_permit_me:
try:
if argv_parsed.drop_all_rules:
ac.drop_access_rules(wdid)
elif argv_parsed.add_rule_permit_me:
ac.add_access_rule(wdid)
except Exception as err:
print('{}'.format(err))
return 1
elif argv_parsed.add_rule_permit_all:
ui_input = None
while ui_input not in ('y', 'yes'):
print('Do you want to permit access by anyone? [y/N] ', end='')
ui_input = input().lower()
if ui_input in ('n', 'no', ''):
return 1
try:
ac.add_access_rule(wdid, to_user='*')
except Exception as err:
print('{}'.format(err))
return 1
else:
print('Use `hardshare rules` with a switch. For example, `hardshare rules -l`')
print('or to get a help message, enter\n\n hardshare help rules')
return 1
elif argv_parsed.command == 'check':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
try:
res = ac.check_registration(argv_parsed.id_prefix)
except:
print('Error occurred while contacting remote server '
'at {}'.format(ac.base_uri))
return 1
if 'err' in res:
if res['err'] == 'not found':
print('not found: workspace deployment with id prefix {}'
.format(res['id_prefix']))
elif res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
else:
print('summary of workspace deployment {}'.format(res['id']))
print('\tcreated: {}'.format(res['date_created']))
print('\torigin (address) of registration: {}'.format(res['origin']))
if 'date_dissolved' in res:
print('\tdissolved: {}'.format(res['date_dissolved']))
elif argv_parsed.command == 'dissolve':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
try:
wdid = str(uuid.UUID(argv_parsed.wdid))
except:
print('The given ID does not appear to be valid.')
return 1
ui_input = None
while ui_input not in ('y', 'yes'):
print(('Do you want to dissolve {}? This action cannot be undone. '
'[y/N] ').format(wdid), end='')
ui_input = input().lower()
if ui_input in ('n', 'no', ''):
return 1
try:
res = ac.dissolve_registration(wdid)
except:
print('Error occurred while contacting remote server '
'at {}'.format(ac.base_uri))
return 1
if 'err' in res:
if res['err'] == 'not found':
print('not found: workspace deployment with id prefix {}'
.format(res['id_prefix']))
elif res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
# Remove from local configuration, if present
rm_wd(get_local_config(), wdid, save=True)
elif argv_parsed.command == 'config':
if argv_parsed.list_config:
try:
config = get_local_config(create_if_empty=argv_parsed.create_config,
collect_errors=True)
except:
print('error loading configuration data.'
' does it exist? is it broken?')
return 1
if not argv_parsed.only_local_config:
# Try to get remote config, given possibly new local config
try:
assert ac is not None
remote_config = ac.get_remote_config(include_dissolved=argv_parsed.include_dissolved)
except HSError as err:
print('Error: {}'.format(err))
return 1
except:
print('Error occurred while contacting rerobots servers')
print('Try config -l --local to only get local information')
return 1
config = {
'local': config,
'remote': remote_config,
}
if 'local' in config:
ref = config['local']['wdeployments']
else:
ref = config['wdeployments']
for jj, wdeployment in enumerate(ref):
ref[jj]['url'] = 'https://rerobots.net/workspace/{}'.format(wdeployment['id'])
if output_format == 'json':
print(json.dumps(config))
elif output_format == 'yaml':
print(yaml.dump(config, default_flow_style=False))
else:
if 'local' not in config:
config = {
'local': config,
'remote': None,
}
print('workspace deployments defined in local configuration:')
if len(config['local']['wdeployments']) == 0:
print('\t(none)')
else:
for wdeployment in config['local']['wdeployments']:
print('{}\n\turl: {}\n\towner: {}\n\tcprovider: {}\n\tcargs: {}'.format(
wdeployment['id'],
wdeployment['url'],
wdeployment['owner'],
wdeployment['cprovider'],
wdeployment['cargs'],
))
if wdeployment['cprovider'] in ['docker', 'podman']:
print('\timg: {}'.format(wdeployment['image']))
if wdeployment['terminate']:
print('\tterminate:')
for terminate_p in wdeployment['terminate']:
print('\t\t{}'.format(terminate_p))
print('\nfound keys:')
if len(config['local']['keys']) == 0:
print('\t(none)')
else:
print('\t' + '\n\t'.join(config['local']['keys']))
if 'err_keys' in config['local'] and len(config['local']['err_keys']) > 0:
print('found possible keys with errors:')
for err_key_path, err in config['local']['err_keys'].items():
print('\t {}: {}'.format(err, err_key_path))
if config['remote']:
if 'err' in config['remote']:
print('Error occurred while contacting remote server.')
if config['remote']['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(config['remote']['err'])
return 1
if len(config['remote']['deployments']) == 0:
print('\nno registered workspace deployments with this user account')
else:
print('\nregistered workspace deployments with this user account:')
for wd in config['remote']['deployments']:
print('{}'.format(wd['id']))
print('\tcreated: {}'.format(wd['date_created']))
if wd['desc'] is not None:
print('\tdesc: {}'.format(wd['desc']))
print('\torigin (address) of registration: {}'
.format(wd['origin']))
if wd['dissolved']:
print('\tdissolved: {}'.format(wd['dissolved']))
elif argv_parsed.prune_err_keys:
_, errored_keys = list_local_keys(collect_errors=True)
for err_key_path, err in errored_keys.items():
print('deleting {}...'.format(err_key_path))
os.unlink(err_key_path)
elif argv_parsed.new_api_token:
try:
add_key(argv_parsed.new_api_token)
except:
print('failed to add key')
return 1
elif argv_parsed.new_ssh_path:
try:
add_ssh_path(argv_parsed.new_ssh_path)
except:
print('ERROR: {} or {} does not exist or '
'has the wrong permissions.'.format(
argv_parsed.new_ssh_path,
argv_parsed.new_ssh_path + '.pub'
))
return 1
elif argv_parsed.create_config:
get_local_config(create_if_empty=True)
elif argv_parsed.declared_wdeployment_id is not None:
assert ac is not None
ac.declare_existing(argv_parsed.declared_wdeployment_id)
ac.sync_config()
elif argv_parsed.raw_device_path is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--add-raw-device not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
if not os.path.exists(argv_parsed.raw_device_path):
print('ERROR: given device file does not exist')
return 1
carg = '--device={D}:{D}'.format(D=argv_parsed.raw_device_path)
config['wdeployments'][index]['cargs'].append(carg)
modify_local(config)
elif argv_parsed.remove_raw_device_path is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
carg = '--device={D}:{D}'.format(D=argv_parsed.remove_raw_device_path)
config['wdeployments'][index]['cargs'].remove(carg)
modify_local(config)
elif argv_parsed.add_init_inside is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--add-init-inside not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
config['wdeployments'][index]['init_inside'].append(argv_parsed.add_init_inside)
modify_local(config)
elif argv_parsed.rm_init_inside:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--rm-init-inside not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
config['wdeployments'][index]['init_inside'] = []
modify_local(config)
elif argv_parsed.cprovider is not None:
selected_cprovider = argv_parsed.cprovider.lower()
if selected_cprovider not in ['docker', 'podman', 'proxy']:
print('ERROR: cprovider must be one of the following: docker, podman, proxy')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
config['wdeployments'][index]['cprovider'] = selected_cprovider
if selected_cprovider == 'proxy':
config['wdeployments'][index]['image'] = None
else: # selected_cprovider \in {docker, podman}
if config['wdeployments'][index]['image'] is None:
config['wdeployments'][index]['image'] = 'rerobots/hs-generic'
modify_local(config)
elif argv_parsed.cprovider_img is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider not in ['docker', 'podman', 'proxy']:
print('unknown cprovider: {}'.format(cprovider))
return 1
if cprovider == 'podman':
cp_images = subprocess.run([cprovider, 'image', 'exists', argv_parsed.cprovider_img])
if cp_images.returncode != 0:
print('ERROR: given image name is not recognized by cprovider')
return 1
elif cprovider == 'docker':
cp_images = subprocess.run([cprovider, 'image', 'inspect', argv_parsed.cprovider_img],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if cp_images.returncode != 0:
print('ERROR: given image name is not recognized by cprovider')
return 1
else: # cprovider == 'proxy'
print('ERROR: --assign-image not supported for cprovider `proxy`')
return 1
config['wdeployments'][index]['image'] = argv_parsed.cprovider_img
modify_local(config)
elif argv_parsed.add_terminate_prog is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
normalized_path = os.path.abspath(argv_parsed.add_terminate_prog)
if not os.path.exists(normalized_path):
print('ERROR: given path does not exist')
return 1
config['wdeployments'][index]['terminate'].append(normalized_path)
modify_local(config)
elif argv_parsed.rm_terminate_prog is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
config['wdeployments'][index]['terminate'].remove(argv_parsed.rm_terminate_prog)
modify_local(config)
else:
print('Use `hardshare config` with a switch. For example, `hardshare config -l`')
print('or to get a help message, enter\n\n hardshare help config')
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 1.882813 | 2 |
Dataset/Leetcode/train/7/93.py | kkcookies99/UAST | 0 | 4372 | <reponame>kkcookies99/UAST
class Solution:
def XXX(self, x: int) -> int:
def solve(x):
a = list(map(int,str(x)))
p = {}
d=0
for ind, val in enumerate(a):
p[ind] = val
for i, v in p.items():
d += v*(10**i)
if (2**31 - 1>= d >= -(2**31)):
return d
else:
return 0
if x>=0:
return (solve(x))
if x<0:
x = -x
return (-solve(x))
| 2.8125 | 3 |
app/realty.py | JenBanks8585/Labs_CitySpireDS | 0 | 4373 | """Realty Info"""
import os
import requests
from dotenv import load_dotenv
from fastapi import APIRouter, Depends
import sqlalchemy
from pydantic import BaseModel, SecretStr
from app import config
from app.walk_score import *
load_dotenv()
router = APIRouter()
headers = {'x-rapidapi-key': os.getenv('api_key'),
'x-rapidapi-host': os.getenv('host') }
@router.get('/streamlined_rent_list')
async def streamlined_rent_list(api_key = config.settings.api_key,
city: str = "New York City",
state: str= "NY",
prop_type: str = "condo",
limit: int = 4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
response = response_for_rent.json()['properties']
rental_list = []
for i in range(limit):
line = response[i]['address']['line']
city = response[i]['address']['city']
state = response[i]['address']['state']
lat = response[i]['address']['lat']
lon = response[i]['address']['lon']
photos = response[i]['photos']
address = line +" "+ city + " "+ state
walk_score = just_walk_score(address, lat, lon)
element = {'address': address,
'lat': lat,
'lon': lon,
'city':city,
'state':state,
'photos': photos,
'walk_score': walk_score}
rental_list.append(element)
return rental_list
@router.get('/for_rent_list')
async def for_rent_list(api_key = config.settings.api_key,
city: str = "New York City",
state: str= "NY",
prop_type: str = "condo",
limit: int = 4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
return response_for_rent.json()['properties']
@router.get('/for_rent_list/{property_id}')
async def property_detail(property_id: str = "O3599084026"):
"""
Parameters:
property_id
Returns:
detailed information about the property
"""
url = os.getenv('url_property_detail')
querystring = {"property_id":property_id}
response_prop_detail = requests.request("GET", url, headers=headers, params=querystring)
return response_prop_detail.json()['properties']
@router.get('/for_sale_list')
async def for_sale_list(api_key = config.settings.api_key,
city = "New York City",
state= "NY",
limit = 4):
url = os.getenv('url_list_for_sale')
querystring = {"city": city ,"limit": limit,"offset":"0","state_code": state,"sort":"relevance"}
response_for_sale = requests.request("GET", url, headers=headers, params=querystring)
return response_for_sale.json()['properties']
| 2.53125 | 3 |
dist/weewx-4.0.0b3/bin/weewx/junk2.py | v0rts/docker-weewx | 10 | 4374 | from __future__ import print_function
import time
import weeutil.weeutil
import weewx.manager
import weewx.xtypes
archive_sqlite = {'database_name': '/home/weewx/archive/weepwr.sdb', 'driver': 'weedb.sqlite'}
archive_mysql = {'database_name': 'weewx', 'user': 'weewx', 'password': '<PASSWORD>', 'driver': 'weedb.mysql'}
sql_str = "SELECT %s(%s), MIN(usUnits), MAX(usUnits) FROM %s " \
"WHERE dateTime > ? AND dateTime <= ?" % ('avg', 'outTemp', 'archive')
timespan = weeutil.weeutil.TimeSpan(1573245000, 1573246800)
timespan = weeutil.weeutil.TimeSpan(1573245000, 1573245000 + 600)
print('timespan=', timespan)
with weewx.manager.Manager.open(archive_sqlite) as db_manager:
interpolate_dict = {
'aggregate_type': 'diff',
'obs_type': 'ch8_a_energy2',
'table_name': db_manager.table_name,
'start': timespan.start,
'stop': timespan.stop,
}
SQL_TEMPLATE = "SELECT (ch8_a_energy2 - (SELECT ch8_a_energy2 FROM archive WHERE dateTime=%(start)s)) / (%(stop)s - %(start)s) FROM archive WHERE dateTime=%(stop)s;"
SQL_TEMPLATE = """Select a.dateTime as StartTime
, b.dateTime as EndTime
, b.dateTime-a.dateTime as TimeChange
, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a
Inner Join archive b ON b.dateTime>=1573245000 AND b.dateTime<=(1573245000 + 600)"""
SQL_TEMPLATE = """Select a.dateTime as StartTime, b.datetime as EndTime, b.dateTime-a.dateTime as TimeChange, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a, archive b WHERE b.dateTime = (Select MAX(c.dateTime) FROM archive c WHERE c.dateTime<=(1573245000+600)) AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime>=1573245000);"""
SQL_TEMPLATE = """Select a.dateTime as StartTime, b.datetime as EndTime, b.dateTime-a.dateTime as TimeChange, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a, archive b WHERE b.dateTime = (Select MAX(dateTime) FROM archive WHERE dateTime<=(1573245000+600)) AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime>=1573245000);"""
SQL_TEMPLATE = "SELECT (b.%(obs_type)s - a.%(obs_type)s) / (b.dateTime-a.dateTime) "\
"FROM archive a, archive b "\
"WHERE b.dateTime = (SELECT MAX(dateTime) FROM archive WHERE dateTime <= %(stop)s) "\
"AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime >= %(start)s);"
sql_stmt = SQL_TEMPLATE % interpolate_dict
print(sql_stmt)
# Get the number of records
with db_manager.connection.cursor() as cursor:
for row in cursor.execute(sql_stmt):
print(row)
| 2.328125 | 2 |
fast_lemon_api_test.py | a6502/fast_lemon_api | 0 | 4375 | <gh_stars>0
#!/usr/bin/env pytest-3
from fastapi.testclient import TestClient
from fast_lemon_api import app
client = TestClient(app)
def test_get_root():
response = client.get("/")
assert response.status_code == 200
assert response.text == "Welcome to the fast-lemon-api!\n"
neworder = {
"isin": "blablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996943663,
"status": "open"
}
order_id = None
def test_post_orders1():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996943663,
})
assert response.status_code == 201
j = response.json()
#print(repr(j))
order_id = j.pop('uuid')
assert j == neworder
#assert 0
def test_post_orders2():
response = client.post('/orders/',
json={
"isin": "blablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'isin'],
'msg': 'ensure this value has at least 12 characters',
'type': 'value_error.any_str.min_length',
'ctx': {
'limit_value': 12
}
}]
}
def test_post_orders3():
response = client.post('/orders/',
json={
"isin": "blablablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 12
},
'loc': ['body', 'isin'],
'msg': 'ensure this value has at most 12 characters',
'type': 'value_error.any_str.max_length'
}]
}
def test_post_orders4():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": -1,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 0
},
'loc': ['body', 'limit_price'],
'msg': 'ensure this value is greater than 0',
'type': 'value_error.number.not_gt'
}]
}
def test_post_orders5():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "BUY!",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'enum_values': ['buy', 'sell']
},
'loc': ['body', 'side'],
'msg':
"value is not a valid enumeration member; permitted: 'buy', 'sell'",
'type': 'type_error.enum'
}]
}
def test_post_orders6():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.33333,
"side": "SELL",
"quantity": 0,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 0
},
'loc': ['body', 'quantity'],
'msg': 'ensure this value is greater than 0',
'type': 'value_error.number.not_gt'
}]
}
def test_post_orders8():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "SELL",
"quantity": 1.1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'quantity'],
'msg': 'value is not a valid integer',
'type': 'type_error.integer'
}]
}
def test_post_orders7():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "SELL",
"quantity": 2,
"valid_until": 1996
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'valid_until'],
'msg': 'valid_until cannot be in the past',
'type': 'value_error'
}]
}
| 2.640625 | 3 |
tests/regenerate_credentials.py | andrewkozlik/pam-u2f | 0 | 4376 | #!/bin/python2
import collections
import re
import subprocess
import sys
PUC = "../pamu2fcfg/pamu2fcfg"
resident = ["", "-r"]
presence = ["", "-P"]
pin = ["", "-N"]
verification = ["", "-V"]
Credential = collections.namedtuple("Credential", "keyhandle pubkey attributes oldformat")
sshformat = 0
def print_test_case(filename, sshformat, credentials):
start = """
cfg.auth_file = "{authfile}";
cfg.sshformat = {ssh};
rc = get_devices_from_authfile(&cfg, username, dev, &n_devs);
assert(rc == 1);
assert(n_devs == {devices});
"""
checks = """
assert(strcmp(dev[{i}].coseType, "es256") == 0);
assert(strcmp(dev[{i}].keyHandle, "{kh}") == 0);
assert(strcmp(dev[{i}].publicKey, "{pk}") == 0);
assert(strcmp(dev[{i}].attributes, "{attr}") == 0);
assert(dev[{i}].old_format == {old});
"""
free = """
free(dev[{i}].coseType);
free(dev[{i}].attributes);
free(dev[{i}].keyHandle);
free(dev[{i}].publicKey);
"""
end = """
memset(dev, 0, sizeof(dev_t) * {devices});
"""
code = ""
free_block = ""
code += start.format(authfile = filename, ssh = sshformat, devices = len(credentials))
for c, v in enumerate(credentials):
code += checks.format(i = c, kh = v.keyhandle, pk = v.pubkey, attr = v.attributes, old = v.oldformat)
free_block += free.format(i = c)
code += free_block + end.format(devices = len(credentials))
print(code)
# Single credentials
print >> sys.stderr, "Generating single credentials"
for r in resident:
for p in presence:
for n in pin:
for v in verification:
filename = "credentials/new_" + r + p + v + n
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
# Double credentials
print >> sys.stderr, "Generating double credentials"
for r in resident:
for p in presence:
for n in pin:
for v in verification:
filename = "credentials/new_double_" + r + p + v + n
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
line = subprocess.check_output([PUC, "-n", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "a") as outfile:
outfile.write(line)
credentials += [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
# Mixed credentials
print >> sys.stderr, "Mixed double credentials"
options = [("", ""), ("", "-P"), ("-P", ""), ("-P", "-P")]
for p1, p2 in options:
filename = "credentials/new_mixed_" + p1 +"1" + p2 + "2"
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", p1])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
line = subprocess.check_output([PUC, "-n", p2])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "a") as outfile:
outfile.write(line)
credentials += [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
| 2.21875 | 2 |
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py | zhangyujing/tensorflow | 13 | 4377 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Scalar Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class AffineScalarBijectorTest(test.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
with self.test_session():
mu = -1.
# scale corresponds to 1.
bijector = AffineScalar(shift=mu)
self.assertEqual("affine_scalar", bijector.name)
def testNoBatchScalar(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = AffineScalar(shift=mu, scale=2.)
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose([-np.log(2.)] * 3,
run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaIdentityIn64BitUserProvidesShiftOnly(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = np.float64([1.])
# One batch, scalar.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.], run(bijector.inverse, x))
self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaIdentityIn64BitUserProvidesScaleOnly(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
multiplier = np.float64([2.])
# One batch, scalar.
# Corresponds to scale = 2, shift = 0.
bijector = AffineScalar(scale=multiplier)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.5], run(bijector.inverse, x))
self.assertAllClose([np.log(0.5)],
run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalarIdentityViaIdentity(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose([0., 0.], run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalarIdentityViaScale(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu, scale=[2., 1])
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
[-np.log(2), 0.], run(bijector.inverse_log_det_jacobian, x))
def testScalarCongruency(self):
with self.test_session():
bijector = AffineScalar(shift=3.6, scale=0.42)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
| 2.140625 | 2 |
mule/util/algorand_util.py | bricerisingalgorand/mule | 0 | 4378 | <reponame>bricerisingalgorand/mule
import os
import subprocess
import json
import urllib.request
from mule.util import os_util
from mule.util import file_util
from mule.util import time_util
from mule.util import s3_util
from mule.util import semver_util
import platform
def build_algo_release_url(package_type, channel, os_type, cpu_arch_type, package_version):
return f"https://algorand-releases.s3.amazonaws.com/channel/{channel}/{package_type}_{channel}_{os_type}-{cpu_arch_type}_{package_version}.tar.gz"
def get_latest_package_version(package_type, channel, os_type, cpu_arch_type):
os_type = os_util.get_os_type()
cpu_arch_type = os_util.get_cpu_arch_type()
package_keys = list(s3_util.get_matching_s3_keys(
'algorand-releases',
f"channel/{channel}/{package_type}_{channel}_{os_type}-{cpu_arch_type}_",
'tar.gz',
s3_auth=False
))
package_versions = list(map(semver_util.parse_version, package_keys))
latest_version = semver_util.get_highest_version(package_versions)
print(f"Found latest version of package type {package_type} for channel {channel}: {latest_version}")
return latest_version
def install_node(data_dir, bin_dir, channel, node_package_version='latest'):
"""
Download and install algod.
"""
node_package_dir = file_util.ensure_folder(f"/tmp/algod-pkg-{time_util.get_timestamp()}")
data_dir = file_util.ensure_folder(data_dir)
bin_dir = file_util.ensure_folder(bin_dir)
os_type = os_util.get_os_type()
cpu_arch_type = os_util.get_cpu_arch_type()
if node_package_version == 'latest':
if channel == 'test':
node_package_version = get_latest_package_version('node', 'stable', os_type, cpu_arch_type)
else:
node_package_version = get_latest_package_version('node', channel, os_type, cpu_arch_type)
print(f"Installing {channel} node package version {node_package_version} to:\n\tbin_dir: {bin_dir}\n\tdata_dir: {data_dir}")
node_package_url = build_algo_release_url('node', channel, os_type, cpu_arch_type, node_package_version)
if channel == 'test':
node_package_url = build_algo_release_url('node', 'stable', os_type, cpu_arch_type, node_package_version)
node_package_tar_path = f"{node_package_dir}/node_package.tar.gz"
_ = urllib.request.urlretrieve(node_package_url, node_package_tar_path)
file_util.decompressTarfile(node_package_tar_path, f"{node_package_dir}")
file_util.mv_folder_contents(f"{node_package_dir}/data", data_dir)
file_util.mv_folder_contents(f"{node_package_dir}/bin", bin_dir)
if channel == 'stable':
file_util.copy_file(
os.path.join(node_package_dir, "genesis/mainnet/genesis.json"),
os.path.join(data_dir, 'genesis.json')
)
else:
file_util.copy_file(
os.path.join(node_package_dir, f"genesis/{channel}net/genesis.json"),
os.path.join(data_dir, 'genesis.json')
)
def show_node_configs(data_dir, kmd_dir):
data_dir = file_util.ensure_folder(data_dir)
kmd_dir = file_util.ensure_folder(kmd_dir)
node_config_path = f"{data_dir}/config.json"
kmd_config_path = f"{kmd_dir}/kmd_config.json"
file_util.ensure_file(node_config_path, '{}')
file_util.ensure_file(kmd_config_path, '{}')
current_node_config = file_util.read_json_file(node_config_path)
current_kmd_config = file_util.read_json_file(kmd_config_path)
print(f"Showing node configs at {node_config_path} with:\n{json.dumps(current_node_config, sort_keys=True, indent=4)}")
print(f"Showing node configs at {kmd_config_path} with:\n{json.dumps(current_kmd_config, sort_keys=True, indent=4)}")
def configure_node(data_dir, kmd_dir, node_config, kmd_config):
data_dir = file_util.ensure_folder(data_dir)
kmd_dir = file_util.ensure_folder(kmd_dir)
node_config_path = f"{data_dir}/config.json"
kmd_config_path = f"{kmd_dir}/kmd_config.json"
file_util.ensure_file(node_config_path, '{}')
file_util.ensure_file(kmd_config_path, '{}')
current_node_config = file_util.read_json_file(node_config_path)
current_kmd_config = file_util.read_json_file(kmd_config_path)
current_node_config.update(node_config)
current_kmd_config.update(kmd_config)
print(f"Updating node configs at {node_config_path} with:\n{json.dumps(node_config, sort_keys=True, indent=4)}")
print(f"Updating node configs at {kmd_config_path} with:\n{json.dumps(kmd_config, sort_keys=True, indent=4)}")
file_util.write_json_file(node_config_path, current_node_config)
file_util.write_json_file(kmd_config_path, current_kmd_config)
def start_node(data_dir, kmd_dir, bin_dir=None):
goal_args = [
'node',
'start',
]
print(f"Starting node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}")
goal(data_dir, kmd_dir, goal_args, bin_dir)
def stop_node(data_dir, kmd_dir, bin_dir=None):
goal_args = [
'node',
'stop',
]
print(f"Stopping node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}")
goal(data_dir, kmd_dir, goal_args, bin_dir)
def restart_node(data_dir, kmd_dir, bin_dir=None):
goal_args = [
'node',
'restart',
]
print(f"Restarting node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}")
goal(data_dir, kmd_dir, goal_args, bin_dir)
def status_node(data_dir, kmd_dir, bin_dir=None):
goal_args = [
'node',
'status',
]
print(f"Status of node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}")
goal(data_dir, kmd_dir, goal_args, bin_dir)
def goal(data_dir, kmd_dir, args, bin_dir=None):
goal_command = ['goal']
if not bin_dir is None:
goal_command = [f"{bin_dir}/goal"]
goal_command.extend([
'-d', data_dir,
'-k', kmd_dir,
])
goal_command.extend(args)
subprocess.run(goal_command, check=True)
def algorand_indexer(args, bin_dir=None, log_file_name=None):
algorand_indexer_command = ['algorand-indexer']
if not bin_dir is None:
algorand_indexer_command = [f"{bin_dir}/algorand-indexer"]
if log_file_name is None:
log_file_name = f"indexer-{time_util.get_timestamp()}.log"
algorand_indexer_command.extend(args)
log_file = open(log_file_name, 'w')
subprocess.Popen(algorand_indexer_command, stdout=log_file, stderr=log_file)
def start_indexer_local_node(node, postgres, bin_dir=None, pid_file=None, log_file_name=None):
algorand_indexer_args = ['daemon']
algorand_indexer_args.extend([
'-d', node['data'],
'--postgres', build_indexer_postgress_connection_string(postgres)
])
if not pid_file is None:
algorand_indexer_args.extend([
'--pidfile', pid_file
])
algorand_indexer(algorand_indexer_args, bin_dir, log_file_name)
def start_indexer_remote_node(node, postgres, bin_dir=None, pid_file=None, log_file_name=None):
algorand_indexer_args = ['daemon']
algorand_indexer_args.extend([
'--algod-net', f"{node['host']}:{node['port']}",
'--algod-token', node['token'],
'--genesis', node['genesis'],
'--postgres', build_indexer_postgress_connection_string(postgres)
])
if not pid_file is None:
algorand_indexer_args.extend([
'--pidfile', pid_file
])
algorand_indexer(algorand_indexer_args, bin_dir, log_file_name)
def build_indexer_postgress_connection_string(postgres):
postgress_connection_string = []
for field in postgres.items():
postgress_connection_string.append(f"{field[0]}={field[1]}")
return ' '.join(postgress_connection_string)
| 2.09375 | 2 |
examples/showcase/src/demos_panels/scrollPanel.py | allbuttonspressed/pyjs | 0 | 4379 | """
The ``ui.ScrollPanel`` class implements a panel that scrolls its contents.
If you want the scroll bars to be always visible, call
``setAlwaysShowScrollBars(True)``. You can also change the current scrolling
position programmatically by calling ``setScrollPosition(vPos)`` and
``setScrollHorizontalPosition(hPos)`` to change the horizontal and vertical
scrolling position, respectively.
It is in the nature of a scrollpanel that if you give it a relative size, it will not work.
This makes it tricky to use it where you want it to fill out a parent widget of unknown size.
To avoid this problem you will have to wrap its content in a SimplePanel and
then use css/oveflow to control its behaviour as shown in the second example:
"container" represents the parent widget that could be any absolute or relative size and
the superscrollpanel will fill it out and apply vertical scrollbars if needed.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
class ScrollPanelDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
vert = VerticalPanel()
vert.setSpacing("10px")
self.add(vert)
panel = ScrollPanel(Size=("300px", "100px"))
contents = HTML("<b>Tao Te Ching, Chapter One</b><p>" +
"The Way that can be told of is not an unvarying " +
"way;<p>The names that can be named are not " +
"unvarying names.<p>It was from the Nameless that " +
"Heaven and Earth sprang;<p>The named is but the " +
"mother that rears the ten thousand creatures, " +
"each after its kind.")
panel.add(contents)
vert.add(panel)
container = SimplePanel(Width="400px", Height="200px")
contents2 = HTML(50*"Dont forget to grab the css for SuperScrollPanel in Showcase.css! ")
panel2 = SuperScrollPanel(contents2)
container.add(panel2)
vert.add(container)
class SuperScrollPanel(ScrollPanel):
def __init__(self, panel):
ScrollPanel.__init__(self)
self.setHeight("100%")
self.setStyleName("SuperScrollPanelOuter")
self.inner = SimplePanel(Height="100%")
self.add(self.inner)
self.inner.setStyleName("SuperScrollPanelInner")
self.inner.add(panel)
| 3.71875 | 4 |
psdn.py | xiongchiamiov/phone-suitable-domain-name | 3 | 4380 | <filename>psdn.py
#!/usr/bin/env python3
# May you recognize your weaknesses and share your strengths.
# May you share freely, never taking more than you give.
# May you find love and love everyone you find.
import re
import time
import whois
phone_spellable = re.compile(r'^[filoqrsuwxy]+$')
candidate_words = []
with open('/usr/share/dict/words') as f:
for word in f:
word = word.strip()
if phone_spellable.match(word):
candidate_words.append((len(word), word))
candidate_words.sort()
for word in candidate_words:
query = False
while query is False:
try:
query = whois.query('%s.com' % word[1])
except:
print("Sleeping five seconds...")
time.sleep(5)
if not query:
print(word)
| 2.8125 | 3 |
src/requester/py/ElevatorTestCaseList.py | akzare/Elevator_Sys_Design | 1 | 4381 | '''
* @file ElevatorTestCaseList.py
* @author <NAME>
* @date 30 July 2020
* @version 0.1
* @brief Implements a class to hold all the test cases during the program life cycle.
'''
#!/usr/bin/env python3
import sys
import ctypes
import ElevatorConfig as cfg
import ElevatorMsgProtocol as msgProto
class ElevatorTestCaseList:
'''
This class builds a test case list out of the configuration
and holds it during the runtime
'''
def __init__(self, config):
self.config = config
self.CallGoTCList = []
def create_testcase_list(self):
'''
Creates a test case list out of the configuration
'''
# ############################################################
# Construct 'call' test cases
for k in self.config.test_case['call'].keys():
msgHdr = msgProto.MsgHeader(tx_node_addr = self.config.test_case['call'][k][0],
rx_node_addr = self.config.test_case['call'][k][1],
msg_id = self.config.test_case['call'][k][2],
msg_class = self.config.test_case['call'][k][3],
hdr_len = self.config.network['packet_header_len'],
payload_len = self.config.network['packet_payload_req_len'])
self.CallGoTCList.append(msgProto.EncodeReqPacket(msg_header = msgHdr,
time_tag = self.config.test_case['call'][k][4],
req_typ = self.config.usr_request['call'],
floor_num = self.config.test_case['call'][k][5],
direction = self.config.test_case['call'][k][6],
go_msg_id = self.config.test_case['call'][k][7],
state = msgProto.CallGoState.READY2GO))
# ############################################################
# Construct 'go' test cases
for k in self.config.test_case['go'].keys():
msgHdr = msgProto.MsgHeader(tx_node_addr = self.config.test_case['go'][k][0],
rx_node_addr = self.config.test_case['go'][k][1],
msg_id = self.config.test_case['go'][k][2],
msg_class = self.config.test_case['go'][k][3],
hdr_len = self.config.network['packet_header_len'],
payload_len = self.config.network['packet_payload_req_len'])
self.CallGoTCList.append(msgProto.EncodeReqPacket(msg_header = msgHdr,
time_tag = self.config.test_case['go'][k][4],
req_typ = self.config.usr_request['go'],
floor_num = self.config.test_case['go'][k][5],
direction = 0,
go_msg_id = 0,
state = msgProto.CallGoState.RESET))
| 2.6875 | 3 |
cart/views.py | pmaigutyak/mp-cart | 1 | 4382 | <gh_stars>1-10
from django.utils.translation import ugettext
from django.views.decorators.http import require_POST
from django.http import JsonResponse
from django.shortcuts import render
from django.core.exceptions import ValidationError
from django.views.decorators.csrf import csrf_exempt
from cart.lib import get_cart
from cart.forms import SelectProductForm, SetQtyForm
@require_POST
def _cart_action_view(request, action_factory, form_class, message):
form = form_class(data=request.POST)
if not form.is_valid():
return JsonResponse({'message': form.errors.as_json()}, status=403)
cart = get_cart(request)
try:
result = action_factory(cart, form.cleaned_data)
except ValidationError as e:
return JsonResponse({'message': ', '.join(e.messages)}, status=403)
return JsonResponse({
'message': message,
'result': result,
'total': cart.printable_total
})
def add(request):
return _cart_action_view(
request,
action_factory=lambda cart, data: cart.add(**data),
form_class=SelectProductForm,
message=ugettext('Product added to cart')
)
def remove(request):
return _cart_action_view(
request,
action_factory=lambda cart, data: cart.remove(**data),
form_class=SelectProductForm,
message=ugettext('Product removed from cart')
)
def get_modal(request):
cart = get_cart(request)
return render(request, 'cart/modal.html', {'cart': cart})
@csrf_exempt
def set_qty(request):
return _cart_action_view(
request,
action_factory=lambda cart, data: cart.set_qty(**data),
form_class=SetQtyForm,
message=ugettext('Quantity updated')
)
| 1.96875 | 2 |
ChessAI/src/const.py | darius-luca-tech/AI_Projects | 2 | 4383 |
#------ game constants -----#
#players
WHITE = 0
BLACK = 1
BOTH = 2
#color for onTurnLabel
PLAYER_COLOR = ["white", "black"]
#figures
PAWN = 1
KNIGHT = 2
BISHOP = 3
ROOK = 4
QUEEN = 5
KING = 6
FIGURE_NAME = [ "", "pawn", "knight", "bishop", "rook", "queen", "king" ]
#used in move 32bit for promotion figure prom_figure = figure-2
PROM_KNIGHT = 0
PROM_BISHOP = 1
PROM_ROOK = 2
PROM_QUEEN = 3
#all lines
A, B, C, D, E, F, G, H = range(8)
#all squares
A1, B1, C1, D1, E1, F1, G1, H1, \
A2, B2, C2, D2, E2, F2, G2, H2, \
A3, B3, C3, D3, E3, F3, G3, H3, \
A4, B4, C4, D4, E4, F4, G4, H4, \
A5, B5, C5, D5, E5, F5, G5, H5, \
A6, B6, C6, D6, E6, F6, G6, H6, \
A7, B7, C7, D7, E7, F7, G7, H7, \
A8, B8, C8, D8, E8, F8, G8, H8 = range(64)
#----- game display constants -----#
DEFAULTBORDERWIDTH = 20
DEFAULTTILEWIDTH = 45
DEFAULTFONTSIZE = (7, 15)
COLORS = { "bg":"#EDC08C",
"border":"#B55602",
"tiles":("#FC9235", "#FFB87A") }
#----- move types -----#
NORMAL_MOVE, CAPTURE, PROMOTION, DOUBLE_STEP, ENPASSANT_CAPTURE, CASTLING, KING_CAPTURE = range(7)
#----- move 32bit reservation -----#
# a single move is stored in 32 bit as follows
# xxxxxxxx xx x xxx xxx xxxxxx xxxxxx xxx
# G F E D C B A
#
# A: move type (0-6)
# B: start sq (0-63)
# C: destination sq (0-63)
# D: start figure (1-6)
# E: captured figure (1-6)
# F: color of moved piece (0-1)
# G: promotion figure (0-3)
#NAME = (start_bit, lenght)
MOVE_TYPE = (0, 3)
MOVE_START = (3, 6)
MOVE_DEST = (9, 6)
MOVE_FIG_START = (15, 3)
MOVE_FIG_CAPTURE = (18, 3)
MOVE_COLOR = (21, 1)
MOVE_PROM = (22, 2)
#----- castling -----#
CASTLING_LEFT = 0
CASTLING_RIGHT = 1
#----- player status -----#
IDELING = 0
PICKING = 1
INF = 1000000
ASCII_FIG = [[],[]]
ASCII_FIG[WHITE] = [ 'x', chr(9817), chr(9816), chr(9815), chr(9814), chr(9813), chr(9812)]
ASCII_FIG[BLACK] = [ 'x', chr(9823), chr(9822), chr(9821), chr(9820), chr(9819), chr(9818)]
#AI constants
CASTLING_RIGHT_LOSS_PENALTY = -40
| 2.109375 | 2 |
agent.py | kapzlok2408/Pokemon-Showdown-Node-Bot | 0 | 4384 | <reponame>kapzlok2408/Pokemon-Showdown-Node-Bot<gh_stars>0
import gym
import gym_pokemon
import random
if __name__ == "__main__":
env = gym.make("Pokemon-v0")
total_reward = 0.0
total_steps = 0
obs = env.reset()
while True:
action = random.randint(-1,8)
obs, reward, done, _ = env.step(action)
total_reward += reward
total_steps += 1
print("Currently %d steps, total reward of %.2f" % (total_steps, total_reward))
if done:
break
| 2.546875 | 3 |
Curso-Em-Video-Python/Mundo-2/EXs/EX038.py | victor-da-costa/Aprendendo-Python | 0 | 4385 | <reponame>victor-da-costa/Aprendendo-Python<filename>Curso-Em-Video-Python/Mundo-2/EXs/EX038.py
num1 = int(input('Digite o 1º número: '))
num2 = int(input('Digite o 2º número: '))
if num1 > num2:
print('O {} é maior que {}'.format(num1, num2))
elif num1 < num2:
print('O {} é maior que4 {}'.format(num2, num1))
else:
print('Os números são iguais')
| 4 | 4 |
setup.py | danjjl/ipyfilechooser | 0 | 4386 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
def read(fname):
"""Open files relative to package."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='ipyfilechooser',
version='0.3.1',
author='<NAME> (@crahan)',
author_email='<EMAIL>',
description=(
'Python file chooser widget for use in '
'Jupyter/IPython in conjunction with ipywidgets'
),
long_description=read('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/crahan/ipyfilechooser',
license='MIT',
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
],
install_requires=[
'ipywidgets'
]
)
| 1.6875 | 2 |
appengine/chromium_build_logs/handler.py | mithro/chromium-infra | 1 | 4387 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import appengine_config
import datetime
import json
import logging
import os.path
import pickle
import sys
import urllib
sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'third_party'))
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import cloudstorage
import app
import gtest_parser
# pylint: disable=pointless-string-statement
"""When displaying a list of results, how many to display on one page."""
PAGE_SIZE = 100
def _clean_int(value, default):
"""Convert a value to an int, or the default value if conversion fails."""
try:
return int(value)
except (TypeError, ValueError), _:
return default
class MyRequestHandler(webapp.RequestHandler):
"""Base request handler with this application specific helpers."""
def _render_template(self, name, values):
"""
Wrapper for template.render that updates response
and knows where to look for templates.
"""
self.response.out.write(template.render(
os.path.join(os.path.dirname(__file__), 'templates', name),
values))
class StatusReceiverAction(MyRequestHandler):
def post(self):
# This handler should be extremely fast so that buildbot doesn't fail
# the push and doesn't get stuck on us. Defer all processing to the
# background.
try:
deferred.defer(app.process_status_push, self.request.body, _queue='fast')
except Exception:
# For large requests we have to do it now. We can't return HTTP 500
# because buildbot will try again.
app.process_status_push(self.request.body)
class FetchBuildersAction(MyRequestHandler):
def get(self):
deferred.defer(app.fetch_builders)
class FetchStepsAction(MyRequestHandler):
def get(self):
deferred.defer(app.fetch_steps)
class UpdateParsedDataAction(MyRequestHandler):
def get(self):
query = app.BuildStep.all(keys_only=True)
query.filter('is_fetched =', True)
query.filter('is_too_large =', False)
deferred.defer(app.for_all_entities,
query,
app.update_parsed_data,
None)
class MainAction(MyRequestHandler):
def get(self):
self._render_template('main.html', {})
class GTestQueryAction(MyRequestHandler):
def get(self):
gtest_results = []
cursor = None
if self.request.get('gtest_query'):
query = app.GTestResult.all()
query.filter('fullname =', self.request.get('gtest_query'))
query.order('-time_finished')
if self.request.get('cursor'):
query.with_cursor(start_cursor=self.request.get('cursor'))
gtest_results = query.fetch(PAGE_SIZE)
cursor = query.cursor()
self._render_template('query.html', {
'gtest_query': self.request.get('gtest_query'),
'cursor': cursor,
'gtest_results': gtest_results,
})
class SuppressionQueryAction(MyRequestHandler):
def get(self):
query = app.MemorySuppressionResult.all()
query.filter('name =', self.request.get('suppression_query'))
query.order('-time_finished')
if self.request.get('cursor'):
query.with_cursor(start_cursor=self.request.get('cursor'))
suppression_results = query.fetch(PAGE_SIZE)
self._render_template('suppression_query.html', {
'suppression_query': self.request.get('suppression_query'),
'cursor': query.cursor(),
'suppression_results': suppression_results,
})
class UnusedSuppressionsAction(MyRequestHandler):
def post(self):
now_timestamp = datetime.datetime.now()
queries = []
for line in self.request.body.splitlines():
query = app.MemorySuppressionResult.all()
query.filter('name =', line)
query.order('-time_finished')
queries.append(query.run(limit=1))
for q in queries:
for sr in q:
if now_timestamp - sr.time_finished > datetime.timedelta(days=30):
self.response.out.write(sr.name + '\n')
class ListAction(MyRequestHandler):
"""Lists stored build results."""
def get(self):
all_steps = app.BuildStep.all().order('-time_finished')
if self.request.get('buildbot_root'):
all_steps.filter('buildbot_root =',
urllib.unquote(self.request.get('buildbot_root')))
if self.request.get('builder'):
all_steps.filter('builder =',
urllib.unquote(self.request.get('builder')))
if self.request.get('step_name'):
all_steps.filter('step_name =',
urllib.unquote(self.request.get('step_name')))
if self.request.get('status'):
all_steps.filter('status =', _clean_int(urllib.unquote(
self.request.get('status')), None))
if self.request.get('cursor'):
all_steps.with_cursor(start_cursor=self.request.get('cursor'))
steps = all_steps.fetch(limit=PAGE_SIZE)
step_names = app.iterate_large_result(app.StepName.all().order('name'))
self._render_template('list.html', {
'buildbot_roots': app.BUILDBOT_ROOTS,
'step_names': step_names,
'steps': steps,
'cursor': all_steps.cursor(),
'filter_buildbot_root': self.request.get('buildbot_root', ''),
'filter_builder': self.request.get('builder', ''),
'filter_step_name': self.request.get('step_name', ''),
'filter_status': self.request.get('status', ''),
})
class BuildStepJSONAction(MyRequestHandler):
def get(self):
all_steps = app.BuildStep.all().order('-time_finished')
if self.request.get('cursor'):
all_steps.with_cursor(start_cursor=self.request.get('cursor'))
build_steps = all_steps.fetch(limit=1000)
json_data = {
'build_steps': [
{
'build_number': bs.build_number,
'buildbot_root': bs.buildbot_root,
'builder': bs.builder,
'status': bs.status,
'step_number': bs.step_number,
'step_name': bs.step_name,
# BigQuery doesn't recognize the T separator, but space works.
'time_started': bs.time_started.isoformat(sep=' '),
'time_finished': bs.time_finished.isoformat(sep=' '),
} for bs in build_steps
],
'cursor': all_steps.cursor(),
}
self.response.out.write(json.dumps(json_data))
class SuppressionSummaryAction(MyRequestHandler):
"""Displays summary information about memory suppressions."""
def get(self):
sort = 'count'
if self.request.get('sort') in ('count',):
sort = self.request.get('sort')
query = app.MemorySuppressionSummary.all()
monthly_timestamp = datetime.date.today().replace(day=1)
query.filter('monthly_timestamp =', monthly_timestamp)
query.order('monthly_timestamp')
query.order('-%s' % sort)
if self.request.get('cursor'):
query.with_cursor(start_cursor=self.request.get('cursor'))
suppression_summaries = query.fetch(PAGE_SIZE)
self._render_template('suppression_summary.html', {
'suppression_summary_query':
self.request.get('suppression_summary_query'),
'suppression_summaries': suppression_summaries,
'cursor': query.cursor(),
'sort': sort,
})
class ViewRawLogAction(blobstore_handlers.BlobstoreDownloadHandler):
"""Sends selected log file to the user."""
def get(self, blobkey): # pylint: disable=arguments-differ
blob_info = blobstore.BlobInfo.get(urllib.unquote(blobkey))
if not blob_info:
self.error(404)
return
self.send_blob(blob_info)
application = webapp.WSGIApplication(
[('/', MainAction),
('/gtest_query', GTestQueryAction),
('/suppression_query', SuppressionQueryAction),
('/suppression_summary', SuppressionSummaryAction),
('/unused_suppressions', UnusedSuppressionsAction),
('/list', ListAction),
('/build_step_json', BuildStepJSONAction),
('/status_receiver', StatusReceiverAction),
('/tasks/fetch_builders', FetchBuildersAction),
('/tasks/fetch_steps', FetchStepsAction),
('/tasks/update_parsed_data', UpdateParsedDataAction),
('/viewlog/raw/(.*)', ViewRawLogAction)])
def main():
my_default_retry_params = cloudstorage.RetryParams(
initial_delay=0.5,
max_delay=30.0,
backoff_factor=2,
urlfetch_timeout=60)
cloudstorage.set_default_retry_params(my_default_retry_params)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| 2.015625 | 2 |
aws_lambda/pytorch/source/caffe2/python/operator_test/elementwise_op_broadcast_test.py | YevhenVieskov/ML-DL-in-production | 4 | 4388 | <reponame>YevhenVieskov/ML-DL-in-production
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
# TODO(jiayq): make them hypothesis tests for better coverage.
class TestElementwiseBroadcast(serial.SerializedTestCase):
@given(**hu.gcs)
def test_broadcast_Add(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(4, 5).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3, 4).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X + Y[:, :, np.newaxis])
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting the first dimension
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(2).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=0)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X + Y[:, np.newaxis, np.newaxis, np.newaxis])
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1, 4, 1).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(**hu.gcs)
def test_broadcast_Mul(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(4, 5).astype(np.float32)
op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X * Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3, 4).astype(np.float32)
op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X * Y[:, :, np.newaxis])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting the first dimension
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(2).astype(np.float32)
op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=0)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X * Y[:, np.newaxis, np.newaxis, np.newaxis])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1, 4, 1).astype(np.float32)
op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X * Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(**hu.gcs)
def test_broadcast_Sub(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(4, 5).astype(np.float32)
op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X - Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3, 4).astype(np.float32)
op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X - Y[:, :, np.newaxis])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting the first dimension
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(2).astype(np.float32)
op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=0)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X - Y[:, np.newaxis, np.newaxis, np.newaxis])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1, 4, 1).astype(np.float32)
op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X - Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@serial.given(**hu.gcs)
def test_broadcast_powt(self, gc, dc):
np.random.seed(101)
#operator
def powt_op(X, Y):
return [np.power(X, Y)]
#two gradients Y*X^(Y-1) and X^Y * ln(X)
def powt_grad(g_out, outputs, fwd_inputs):
[X, Y] = fwd_inputs
Z = outputs[0]
return ([Y * np.power(X, Y - 1), Z * np.log(X)] * g_out)
#1. Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0
Y = np.random.rand(4, 5).astype(np.float32) + 2.0
#two gradients Y*X^(Y-1) and X^Y * ln(X)
#latter gradient is sumed over 1 and 0 dims to account for broadcast
def powt_grad_broadcast(g_out, outputs, fwd_inputs):
[GX, GY] = powt_grad(g_out, outputs, fwd_inputs)
return ([GX, np.sum(np.sum(GY, 1), 0)])
op = core.CreateOperator("Pow", ["X", "Y"], "Z", broadcast=1)
self.assertReferenceChecks(device_option=gc,
op=op,
inputs=[X, Y],
reference=powt_op,
output_to_grad="Z",
grad_reference=powt_grad_broadcast)
#2. broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0
Y = np.random.rand(3, 4).astype(np.float32) + 2.0
#pow op with the latter array increased by one dim
def powt_op_axis1(X, Y):
return powt_op(X, Y[:, :, np.newaxis])
#two gradients Y*X^(Y-1) and X^Y * ln(X)
#latter gradient is sumed over 3 and 0 dims to account for broadcast
def powt_grad_axis1(g_out, outputs, fwd_inputs):
[X, Y] = fwd_inputs
[GX, GY] = powt_grad(g_out, outputs, [X, Y[:, :, np.newaxis]])
return ([GX, np.sum(np.sum(GY, 3), 0)])
op = core.CreateOperator("Pow", ["X", "Y"], "Z", broadcast=1, axis=1)
self.assertReferenceChecks(device_option=gc,
op=op,
inputs=[X, Y],
reference=powt_op_axis1,
output_to_grad="Z",
grad_reference=powt_grad_axis1)
#3. broadcasting the first dimension
X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0
Y = np.random.rand(2).astype(np.float32) + 2.0
#pow op with the latter array increased by one dim
def powt_op_axis0(X, Y):
return powt_op(X, Y[:, np.newaxis, np.newaxis, np.newaxis])
#two gradients Y*X^(Y-1) and X^Y * ln(X)
#latter gradient is sumed over 3, 2 and 1 dims to account for broadcast
def powt_grad_axis0(g_out, outputs, fwd_inputs):
[X, Y] = fwd_inputs
[GX, GY] = powt_grad(g_out,
outputs,
[X, Y[:, np.newaxis, np.newaxis, np.newaxis]])
return ([GX, np.sum(np.sum(np.sum(GY, 3), 2), 1)])
op = core.CreateOperator("Pow", ["X", "Y"], "Z", broadcast=1, axis=0)
self.assertReferenceChecks(device_option=gc,
op=op,
inputs=[X, Y],
reference=powt_op_axis0,
output_to_grad="Z",
grad_reference=powt_grad_axis0)
#4. broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0
Y = np.random.rand(1, 4, 1).astype(np.float32) + 2.0
#pow op with the latter array increased by one dim
def powt_op_mixed(X, Y):
return powt_op(X, Y[np.newaxis, :, :, :])
#two gradients Y*X^(Y-1) and X^Y * ln(X)
#latter gradient is sumed over 0 and 1 dims to account for broadcast
def powt_grad_mixed(g_out, outputs, fwd_inputs):
[X, Y] = fwd_inputs
[GX, GY] = powt_grad(g_out, outputs, [X, Y[np.newaxis, :, :, :]])
return ([GX, np.reshape(np.sum(np.sum(np.sum(GY, 3), 1), 0),
(1, 4, 1))])
op = core.CreateOperator("Pow", ["X", "Y"], "Z", broadcast=1, axis=1)
self.assertReferenceChecks(device_option=gc,
op=op,
inputs=[X, Y],
reference=powt_op_mixed,
output_to_grad="Z",
grad_reference=powt_grad_mixed)
@given(**hu.gcs)
def test_broadcast_scalar(self, gc, dc):
# broadcasting constant
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1).astype(np.float32)
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting scalar
X = np.random.rand(1).astype(np.float32)
Y = np.random.rand(1).astype(np.float32).reshape([])
op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
@given(**hu.gcs)
def test_semantic_broadcast(self, gc, dc):
# NCHW as default
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3).astype(np.float32)
op = core.CreateOperator(
"Add", ["X", "Y"], "out", broadcast=1, axis_str="C")
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(
out, X + Y[:, np.newaxis, np.newaxis])
self.assertDeviceChecks(dc, op, [X, Y], [0])
# NHWC
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(5).astype(np.float32)
op = core.CreateOperator(
"Add", ["X", "Y"], "out", broadcast=1, axis_str="C", order="NHWC")
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
np.testing.assert_array_almost_equal(out, X + Y)
self.assertDeviceChecks(dc, op, [X, Y], [0])
@given(**hu.gcs)
def test_sum_reduce_empty_blob(self, gc, dc):
net = core.Net('test')
with core.DeviceScope(gc):
net.GivenTensorFill([], ["X"], values=[], shape=[2, 0, 5])
net.GivenTensorFill([], ["Y"], values=[], shape=[2, 0])
net.SumReduceLike(["X", "Y"], "out", axis=0)
workspace.RunNetOnce(net)
@given(**hu.gcs)
def test_sum_reduce(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(4, 5).astype(np.float32)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.sum(X, axis=0)
res = np.sum(res, axis=0)
np.testing.assert_array_almost_equal(out, res)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(2, 3).astype(np.float32)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=0)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.sum(X, axis=3)
res = np.sum(res, axis=2)
np.testing.assert_array_almost_equal(out, res, decimal=3)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3, 4).astype(np.float32)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.sum(X, axis=0)
res = np.sum(res, axis=2)
np.testing.assert_array_almost_equal(out, res)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 500).astype(np.float64)
Y = np.random.rand(1).astype(np.float64)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.array(np.sum(X))
np.testing.assert_array_almost_equal(out, res, decimal=0)
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1, 3, 4, 1).astype(np.float32)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(op)
out = workspace.FetchBlob("out")
res = np.sum(X, axis=0)
res = np.sum(res, axis=2).reshape(Y.shape)
np.testing.assert_array_almost_equal(out, res)
self.assertDeviceChecks(dc, op, [X, Y], [0])
# fp64 is not supported with the CUDA op
dc_cpu_only = [d for d in dc if d.device_type != caffe2_pb2.CUDA]
self.assertDeviceChecks(dc_cpu_only, op, [X, Y], [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(**hu.gcs_gpu_only)
def test_sum_reduce_fp16(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float16)
Y = np.random.rand(4, 5).astype(np.float16)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, device_option=gc)
def ref_op(X, Y):
res = np.sum(X, axis=0)
res = np.sum(res, axis=0)
return [res]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=ref_op,
threshold=1e-3)
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.rand(2, 3, 4, 5).astype(np.float16)
Y = np.random.rand(2, 3).astype(np.float16)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=0)
def ref_op(X, Y):
res = np.sum(X, axis=3)
res = np.sum(res, axis=2)
return [res]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=ref_op,
threshold=1e-3)
# broadcasting intermediate dimensions
X = np.random.rand(2, 3, 4, 5).astype(np.float16)
Y = np.random.rand(3, 4).astype(np.float16)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=1)
def ref_op(X, Y):
res = np.sum(X, axis=0)
res = np.sum(res, axis=2)
return [res]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=ref_op,
threshold=1e-3)
# broadcasting with single elem dimensions at both ends
X = np.random.rand(2, 3, 4, 5).astype(np.float16)
Y = np.random.rand(1, 3, 4, 1).astype(np.float16)
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
def ref_op(X, Y):
res = np.sum(X, axis=0)
res = np.sum(res, axis=2)
return [res.reshape(Y.shape)]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=ref_op,
threshold=1e-3)
if __name__ == "__main__":
unittest.main()
| 2.234375 | 2 |
kayobe/tests/unit/cli/test_commands.py | jovial/kayobe | 0 | 4389 | <filename>kayobe/tests/unit/cli/test_commands.py
# Copyright (c) 2017 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import cliff.app
import cliff.commandmanager
import mock
from kayobe.cli import commands
from kayobe import utils
class TestApp(cliff.app.App):
def __init__(self):
super(TestApp, self).__init__(
description='Test app',
version='0.1',
command_manager=cliff.commandmanager.CommandManager('kayobe.cli'))
class TestCase(unittest.TestCase):
@mock.patch.object(utils, "galaxy_install", spec=True)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_control_host_bootstrap(self, mock_run, mock_install):
command = commands.ControlHostBootstrap(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
mock_install.assert_called_once_with("requirements.yml",
"ansible/roles")
expected_calls = [
mock.call(mock.ANY, ["ansible/bootstrap.yml"]),
mock.call(mock.ANY, ["ansible/kolla-ansible.yml"],
tags="install"),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(utils, "galaxy_install", spec=True)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_control_host_upgrade(self, mock_run, mock_install):
command = commands.ControlHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
mock_install.assert_called_once_with("requirements.yml",
"ansible/roles", force=True)
expected_calls = [
mock.call(mock.ANY, ["ansible/bootstrap.yml"]),
mock.call(mock.ANY, ["ansible/kolla-ansible.yml"],
tags="install"),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_network_connectivity_check(self, mock_run):
command = commands.NetworkConnectivityCheck(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(mock.ANY, ["ansible/network-connectivity.yml"]),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_hypervisor_host_configure(self, mock_run, mock_dump):
command = commands.SeedHypervisorHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = "stack"
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(mock.ANY, host="seed-hypervisor",
var_name="kayobe_ansible_user", tags="dump-config")
]
self.assertEqual(expected_calls, mock_dump.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/ip-allocation.yml",
"ansible/ssh-known-host.yml",
"ansible/kayobe-ansible-user.yml",
"ansible/kayobe-target-venv.yml",
"ansible/users.yml",
"ansible/yum.yml",
"ansible/dev-tools.yml",
"ansible/network.yml",
"ansible/sysctl.yml",
"ansible/ntp.yml",
"ansible/seed-hypervisor-libvirt-host.yml",
],
limit="seed-hypervisor",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_hypervisor_host_upgrade(self, mock_run):
command = commands.SeedHypervisorHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/kayobe-target-venv.yml",
"ansible/kolla-target-venv.yml",
],
limit="seed-hypervisor",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_seed_host_configure(self, mock_kolla_run, mock_run, mock_dump):
command = commands.SeedHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"seed": {"kayobe_ansible_user": "stack"}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(mock.ANY, hosts="seed", tags="dump-config")
]
self.assertEqual(expected_calls, mock_dump.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/ip-allocation.yml",
"ansible/ssh-known-host.yml",
"ansible/kayobe-ansible-user.yml",
"ansible/kayobe-target-venv.yml",
"ansible/users.yml",
"ansible/yum.yml",
"ansible/dev-tools.yml",
"ansible/disable-selinux.yml",
"ansible/network.yml",
"ansible/sysctl.yml",
"ansible/ip-routing.yml",
"ansible/snat.yml",
"ansible/disable-glean.yml",
"ansible/ntp.yml",
"ansible/lvm.yml",
],
limit="seed",
),
mock.call(
mock.ANY,
["ansible/kolla-ansible.yml"],
tags="config",
),
mock.call(
mock.ANY,
[
"ansible/kolla-target-venv.yml",
"ansible/kolla-host.yml",
"ansible/docker.yml",
],
limit="seed",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={"ansible_user": "stack"},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_seed_host_configure_kayobe_venv(self, mock_kolla_run, mock_run,
mock_dump):
command = commands.SeedHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"seed": {
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"kayobe_ansible_user": "stack",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"ansible_user": "stack",
},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_seed_host_configure_kolla_venv(self, mock_kolla_run, mock_run,
mock_dump):
command = commands.SeedHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"seed": {
"kayobe_ansible_user": "stack",
"kolla_ansible_target_venv": "/kolla/venv/bin/python",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/usr/bin/python",
"ansible_user": "stack",
"virtualenv": "/kolla/venv/bin/python",
},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_seed_host_configure_both_venvs(self, mock_kolla_run, mock_run,
mock_dump):
command = commands.SeedHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"seed": {
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"kayobe_ansible_user": "stack",
"kolla_ansible_target_venv": "/kolla/venv/bin/python",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"ansible_user": "stack",
"virtualenv": "/kolla/venv/bin/python",
},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_host_upgrade(self, mock_run):
command = commands.SeedHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/kayobe-target-venv.yml",
"ansible/kolla-target-venv.yml",
],
limit="seed",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_container_image_build(self, mock_run):
command = commands.SeedContainerImageBuild(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/container-image-builders-check.yml",
"ansible/kolla-build.yml",
"ansible/container-image-build.yml"
],
extra_vars={
"container_image_sets": (
"{{ seed_container_image_sets }}"),
"push_images": False,
}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_seed_container_image_build_with_regex(self, mock_run):
command = commands.SeedContainerImageBuild(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args(["--push", "^regex1$", "^regex2$"])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/container-image-builders-check.yml",
"ansible/kolla-build.yml",
"ansible/container-image-build.yml"
],
extra_vars={
"container_image_regexes": "'^regex1$ ^regex2$'",
"push_images": True,
}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_seed")
def test_service_deploy(self, mock_kolla_run, mock_run):
command = commands.SeedServiceDeploy(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
["ansible/kolla-ansible.yml"],
tags="config",
),
mock.call(
mock.ANY,
["ansible/kolla-bifrost.yml"],
),
mock.call(
mock.ANY,
[
"ansible/overcloud-host-image-workaround-resolv.yml",
"ansible/seed-introspection-rules.yml",
"ansible/dell-switch-bmp.yml",
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
"deploy-bifrost",
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_overcloud")
def test_overcloud_host_configure(self, mock_kolla_run, mock_run,
mock_dump):
command = commands.OvercloudHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"controller0": {"kayobe_ansible_user": "stack"}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(mock.ANY, hosts="overcloud", tags="dump-config")
]
self.assertEqual(expected_calls, mock_dump.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/ip-allocation.yml",
"ansible/ssh-known-host.yml",
"ansible/kayobe-ansible-user.yml",
"ansible/kayobe-target-venv.yml",
"ansible/users.yml",
"ansible/yum.yml",
"ansible/dev-tools.yml",
"ansible/disable-selinux.yml",
"ansible/network.yml",
"ansible/sysctl.yml",
"ansible/disable-glean.yml",
"ansible/disable-cloud-init.yml",
"ansible/ntp.yml",
"ansible/lvm.yml",
],
limit="overcloud",
),
mock.call(
mock.ANY,
["ansible/kolla-ansible.yml"],
tags="config",
),
mock.call(
mock.ANY,
[
"ansible/kolla-target-venv.yml",
"ansible/kolla-host.yml",
"ansible/docker.yml",
"ansible/ceph-block-devices.yml",
],
limit="overcloud",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={"ansible_user": "stack"},
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_overcloud")
def test_overcloud_host_configure_kayobe_venv(self, mock_kolla_run,
mock_run, mock_dump):
command = commands.OvercloudHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"controller0": {
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"kayobe_ansible_user": "stack",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"ansible_user": "stack",
}
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_overcloud")
def test_overcloud_host_configure_kolla_venv(self, mock_kolla_run,
mock_run, mock_dump):
command = commands.OvercloudHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"controller0": {
"kayobe_ansible_user": "stack",
"kolla_ansible_target_venv": "/kolla/venv/bin/python",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/usr/bin/python",
"ansible_user": "stack",
"virtualenv": "/kolla/venv/bin/python",
}
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_config_dump")
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
@mock.patch.object(commands.KollaAnsibleMixin,
"run_kolla_ansible_overcloud")
def test_overcloud_host_configure_both_venvs(self, mock_kolla_run,
mock_run, mock_dump):
command = commands.OvercloudHostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
mock_dump.return_value = {
"controller0": {
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"kayobe_ansible_user": "stack",
"kolla_ansible_target_venv": "/kolla/venv/bin/python",
}
}
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
"bootstrap-servers",
extra_vars={
"ansible_python_interpreter": "/kayobe/venv/bin/python",
"ansible_user": "stack",
"virtualenv": "/kolla/venv/bin/python",
}
),
]
self.assertEqual(expected_calls, mock_kolla_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_overcloud_host_upgrade(self, mock_run):
command = commands.OvercloudHostUpgrade(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/kayobe-target-venv.yml",
"ansible/kolla-target-venv.yml",
"ansible/overcloud-docker-sdk-upgrade.yml",
"ansible/overcloud-etc-hosts-fixup.yml",
],
limit="overcloud",
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_overcloud_container_image_build(self, mock_run):
command = commands.OvercloudContainerImageBuild(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/container-image-builders-check.yml",
"ansible/kolla-build.yml",
"ansible/container-image-build.yml"
],
extra_vars={
"container_image_sets": (
"{{ overcloud_container_image_sets }}"),
"push_images": False,
}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_overcloud_container_image_build_with_regex(self, mock_run):
command = commands.OvercloudContainerImageBuild(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args(["--push", "^regex1$", "^regex2$"])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/container-image-builders-check.yml",
"ansible/kolla-build.yml",
"ansible/container-image-build.yml"
],
extra_vars={
"container_image_regexes": "'^regex1$ ^regex2$'",
"push_images": True,
}
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_overcloud_post_configure(self, mock_run):
command = commands.OvercloudPostConfigure(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
'ansible/overcloud-ipa-images.yml',
'ansible/overcloud-introspection-rules.yml',
'ansible/overcloud-introspection-rules-dell-lldp-workaround.yml', # noqa
'ansible/provision-net.yml',
'ansible/overcloud-grafana-configure.yml'
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_baremetal_compute_inspect(self, mock_run):
command = commands.BaremetalComputeInspect(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/baremetal-compute-inspect.yml",
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_baremetal_compute_manage(self, mock_run):
command = commands.BaremetalComputeManage(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/baremetal-compute-manage.yml",
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
@mock.patch.object(commands.KayobeAnsibleMixin,
"run_kayobe_playbooks")
def test_baremetal_compute_provide(self, mock_run):
command = commands.BaremetalComputeProvide(TestApp(), [])
parser = command.get_parser("test")
parsed_args = parser.parse_args([])
result = command.run(parsed_args)
self.assertEqual(0, result)
expected_calls = [
mock.call(
mock.ANY,
[
"ansible/baremetal-compute-provide.yml",
],
),
]
self.assertEqual(expected_calls, mock_run.call_args_list)
| 2.125 | 2 |
keras2onnx/proto/tfcompat.py | CNugteren/keras-onnx | 1 | 4390 | ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import tensorflow as _tf
from distutils.version import StrictVersion
is_tf2 = StrictVersion(_tf.__version__.split('-')[0]) >= StrictVersion('2.0.0')
def normalize_tensor_shape(tensor_shape):
if is_tf2:
return [d for d in tensor_shape]
else:
return [d.value for d in tensor_shape]
def dump_graph_into_tensorboard(tf_graph):
# type: (_tf.Graph) -> None
_tb_log_dir = os.environ.get('TB_LOG_DIR')
if _tb_log_dir:
if is_tf2:
from tensorflow.python.ops.summary_ops_v2 import graph as write_graph
pb_visual_writer = _tf.summary.create_file_writer(_tb_log_dir)
with pb_visual_writer.as_default():
write_graph(tf_graph)
else:
from tensorflow.python.summary import summary
pb_visual_writer = summary.FileWriter(_tb_log_dir)
pb_visual_writer.add_graph(tf_graph)
if is_tf2:
tensorflow = _tf.compat.v1
def is_subclassed(layer):
"""Returns True if the object is a subclassed layer or subclassed model."""
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
else:
tensorflow = _tf
def is_subclassed(layer):
return False
| 2.203125 | 2 |
back2back/httpmulticlient.py | excentis/ByteBlower_python_examples | 2 | 4391 | <gh_stars>1-10
"""
HTTP MultiServer/MultiClient for the ByteBlower Python API.
All examples are guaranteed to work with Python 2.7 and above
Copyright 2018, Ex<NAME>.
"""
# Needed for python2 / python3 print function compatibility
from __future__ import print_function
# import the ByteBlower module
import byteblowerll.byteblower as byteblower
import time
configuration = {
# Address (IP or FQDN) of the ByteBlower server to use
'server_address': 'byteblower-tp-1300.lab.byteblower.excentis.com',
# Configuration for the first ByteBlower port.
# Will be used as HTTP server.
'port_1_config': {
'interface': 'trunk-1-13',
'mac': '00:bb:01:00:00:01',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# 'ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', '64'],
# TCP port number to be used by the HTTP connection.
# On the HTTP server, this will be the port on which the server
# listens.
'tcp_port': 4096
},
# Configuration for the second ByteBlower port.
# Will be used as HTTP client.
'port_2_config': {
'interface': 'trunk-1-25',
'mac': '00:bb:01:00:00:02',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', '64'],
# TCP port range the HTTP Clients will use to connect with
# the HTTP server
'tcp_port_min': 32000,
'tcp_port_max': 50000
},
# HTTP Method
# HTTP Method can be GET or PUT
# - GET: Standard HTTP download, we retrieve data from the web server
# - PUT: Standard HTTP upload, the wireless endpoint will push data to the
# webserver
'http_method': 'GET',
# 'http_method': 'PUT',
# total duration, in nanoseconds.
# This is the duration of the flow. When this duration expires,
# all sessions will be stopped.
'duration': 10000000000,
# session duration, in nanoseconds
# Duration of the individual sessions
# 'session_duration': 1500000000,
'session_duration': None,
# session size, in bytes
# The number of bytes transmitted by a session
'session_size': 1 * 1000 * 1000,
# 'session_size': None,
# max concurrent sessions
# Maximum number of sessions that will be running simultaneously
'max_concurrent_sessions': 100,
# maximum number of sessions
# No more than this number of sessions will be created
# 0 means no limit
'max_total_sessions': 0,
# TOS value to use on the HTTP client (and server)
'tos': 0
}
class Example:
def __init__(self, **kwargs):
self.server_address = kwargs['server_address']
self.port_1_config = kwargs['port_1_config']
self.port_2_config = kwargs['port_2_config']
# Helper function, we can use this to parse the HTTP Method to the
# enumeration used by the API
from byteblowerll.byteblower import ParseHTTPRequestMethodFromString
http_method_arg = kwargs['http_method']
self.http_method = ParseHTTPRequestMethodFromString(http_method_arg)
self.duration = kwargs['duration']
self.session_duration = kwargs['session_duration']
self.session_size = kwargs['session_size']
self.max_concurrent_sessions = kwargs['max_concurrent_sessions']
self.max_total_sessions = kwargs['max_total_sessions']
self.tos = kwargs['tos']
self.server = None
self.port_1 = None
self.port_2 = None
def cleanup(self):
"""Clean up the created objects"""
byteblower_instance = byteblower.ByteBlower.InstanceGet()
if self.port_1:
self.server.PortDestroy(self.port_1)
self.port_1 = None
if self.port_2:
self.server.PortDestroy(self.port_2)
self.port_2 = None
if self.server is not None:
byteblower_instance.ServerRemove(self.server)
self.server = None
def run(self):
byteblower_instance = byteblower.ByteBlower.InstanceGet()
print("Connecting to ByteBlower server %s..." % self.server_address)
self.server = byteblower_instance.ServerAdd(self.server_address)
# Create the port which will be the HTTP server (port_1)
print("Creating HTTP Server port")
self.port_1 = self.provision_port(self.port_1_config)
print("Creating HTTP Client port")
# Create the port which will be the HTTP client (port_2)
self.port_2 = self.provision_port(self.port_2_config)
http_server_ip_address = self.port_1_config['ip_address']
# create a HTTP server
http_server = self.port_1.ProtocolHttpMultiServerAdd()
server_tcp_port = self.port_1_config['tcp_port']
if server_tcp_port is not None:
http_server.PortSet(server_tcp_port)
else:
server_tcp_port = http_server.PortGet()
# create a HTTP Client
http_client = self.port_2.ProtocolHttpMultiClientAdd()
# - remote endpoint
http_client.RemoteAddressSet(http_server_ip_address)
http_client.RemotePortSet(server_tcp_port)
# - local endpoint
http_client.LocalPortRangeSet(self.port_2_config['tcp_port_min'],
self.port_2_config['tcp_port_max'])
# Configure the direction.
# If the HTTP Method is GET,
# traffic will flow from the HTTP server to the HTTP client
# If the HTTP Method is PUT,
# traffic will flow from the HTTP client to the HTTP server
http_client.HttpMethodSet(self.http_method)
print("Server port:", self.port_1.DescriptionGet())
print("Client port:", self.port_2.DescriptionGet())
# let the HTTP server listen for requests
http_server.Start()
# - total duration of all sessions
http_client.DurationSet(self.duration)
# - how many connections can be created?
http_client.CumulativeConnectionLimitSet(self.max_total_sessions)
# - how many connections can be running at the same time
http_client.MaximumConcurrentRequestsSet(self.max_concurrent_sessions)
# - individual duration, can be size-based or time-based
if self.session_duration is not None:
# let the HTTP Client request a page of a specific duration
# to download...
http_client.SessionDurationSet(self.session_duration)
elif self.session_size is not None:
# let the HTTP Client request a page of a specific size...
http_client.SessionSizeSet(self.session_size)
else:
raise ValueError("Either duration or request_size must be configured")
print("Starting the HTTP client")
http_client.Start()
http_client_result = http_client.ResultGet()
for iteration in range(10):
time.sleep(1)
http_client_result.Refresh()
print("-" * 10)
print("Iteration", iteration+1)
print(" connections attempted", http_client_result.ConnectionsAttemptedGet())
print(" connections established", http_client_result.ConnectionsEstablishedGet())
print(" connections aborted", http_client_result.ConnectionsAbortedGet())
print(" connections refused", http_client_result.ConnectionsRefusedGet())
print("-" * 10)
http_client.Stop()
http_server.Stop()
print("Stopped the HTTP client")
request_status_value = http_client.StatusGet()
request_status_string = byteblower.ConvertHTTPMultiClientStatusToString(request_status_value)
http_client_result.Refresh()
tx_bytes = http_client_result.TcpTxByteCountGet()
tx_speed = http_client_result.TcpTxSpeedGet()
rx_bytes = http_client_result.TcpRxByteCountGet()
rx_speed = http_client_result.TcpRxSpeedGet()
http_server_result = http_server.ResultGet()
http_server_result.Refresh()
print("Requested Duration : {} nanoseconds".format(self.duration))
print("Status : {}".format(request_status_string))
print("Client Result data : {}".format(http_client_result.DescriptionGet()))
print("Server Result data : {}".format(http_server_result.DescriptionGet()))
return [
self.duration,
self.session_duration,
self.session_size,
self.max_total_sessions,
self.max_concurrent_sessions,
tx_bytes, rx_bytes,
tx_speed, rx_speed,
request_status_value
]
def provision_port(self, config):
port = self.server.PortCreate(config['interface'])
port_l2 = port.Layer2EthIISet()
port_l2.MacSet(config['mac'])
ip_config = config['ip']
if not isinstance(ip_config, list):
# Config is not static, DHCP or slaac
if ip_config.lower() == "dhcpv4":
port_l3 = port.Layer3IPv4Set()
port_l3.ProtocolDhcpGet().Perform()
config['ip_address'] = port_l3.IpGet()
elif ip_config.lower() == "dhcpv6":
port_l3 = port.Layer3IPv6Set()
port_l3.ProtocolDhcpGet().Perform()
config['ip_address'] = port_l3.IpDhcpGet()
elif ip_config.lower() == "slaac":
port_l3 = port.Layer3IPv6Set()
port_l3.StatelessAutoconfiguration()
config['ip_address'] = port_l3.IpStatelessGet()
else:
# Static configuration
if len(ip_config) == 3:
# IPv4
port_l3 = port.Layer3IPv4Set()
port_l3.IpSet(ip_config[0])
port_l3.NetmaskSet(ip_config[1])
port_l3.GatewaySet(ip_config[2])
config['ip_address'] = port_l3.IpGet()
elif len(ip_config) == 2:
port_l3 = port.Layer3IPv6Set()
# IPv6
address = ip_config[0]
prefix_length = ip_config[1]
ip = "{}/{}".format(address, prefix_length)
port_l3.IpManualAdd(ip)
config['ip_address'] = ip_config[0]
if not isinstance(config['ip_address'], str):
ip = config['ip_address'][0]
if '/' in ip:
config['ip_address'] = ip.split('/')[0]
print("Created port", port.DescriptionGet())
return port
# When this python module is called stand-alone, the run-function must be
# called. This approach makes it possible to include it in a series of
# examples.
if __name__ == "__main__":
example = Example(**configuration)
try:
example.run()
finally:
example.cleanup()
| 2.6875 | 3 |
tools/pod-xml-to-geojson.py | 24-timmarsseglingarna/app | 0 | 4392 | #!/usr/bin/env python
# Converts a PoD XML file to a GeoJSON file.
#
# With the --javascript parameter, the generated file is a javascript
# file defining a variable 'basePodSpec'.
#
# Get the PoD XML file from http://dev.24-timmars.nu/PoD/xmlapi_app.php.
import xml.etree.ElementTree as etree
import argparse
import re
import json
import io
import sys
import os.path
import datetime
if sys.version < '3':
import codecs
# points number 9000 and above are not real points; they are used to mark
# area borders
MAXPOINT=8999
def run():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", help="input file")
parser.add_argument("-o", "--outfile", help="output file")
parser.add_argument("--id", help="id of terrain")
parser.add_argument("--javascript", action="store_true")
args = parser.parse_args()
tree = etree.parse(args.infile)
all_points, start_points, turning_points = get_points(tree)
inshore_legs, offshore_legs = get_legs(tree, all_points)
output_pod(args.outfile, args.javascript, args.id,
[('startPoints', start_points),
('turningPoints', turning_points),
('inshoreLegs', inshore_legs),
('offshoreLegs', offshore_legs)])
def output_pod(fname, javascript, id, features):
if sys.version < '3':
fd = codecs.open(fname, "w", encoding="utf-8")
else:
fd = io.open(fname, "w", encoding="utf-8")
if javascript:
fd.write(u'/* eslint-disable */\n')
fd.write(u'export var basePodSpec = ')
fd.write(u'{"id": %s, ' % id)
flen = len(features)
i = 1
for (name, obj) in features:
fd.write(u'"%s": {"type": "FeatureCollection",'
'"crs": { "type": "name",'
'"properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },'
'"features":' % name)
fd.write(json.dumps(obj, ensure_ascii=False))
if i == flen:
fd.write(u'}')
else:
i = i + 1
fd.write(u'},\n')
if javascript:
fd.write(u'};\n')
else:
fd.write(u'}\n')
def get_points(tree):
doc = tree.getroot()
startnumbers = {}
all_points = {}
start_points = []
turning_points = []
for n in doc.findall("kretsar/krets/startpoints/number"):
startnumbers[n.text] = True
for p in doc.findall("points/point"):
number = p.find("number").text
if int(number) > MAXPOINT:
continue
name = p.find("name").text
descr = p.find("descr").text
lat = p.find("lat").text
lng = p.find("long").text
footnote = None
footnoteelem = p.find("footnote")
if footnoteelem is not None:
footnote = footnoteelem.text
properties = {"number": number,
"name": name,
"descr": descr}
if footnote != None:
properties["footnote"] = footnote
coordinates = [float(lng), float(lat)]
geometry = {"type": "Point",
"coordinates": coordinates}
point = {"type": "Feature",
"properties": properties,
"geometry": geometry},
if number in startnumbers:
start_points.extend(point)
else:
turning_points.extend(point)
all_points[number] = coordinates
return all_points, start_points, turning_points
def get_legs(tree, all_points):
doc = tree.getroot()
coast = []
offshore = []
for p in doc.findall("legs/leg"):
src = p.find("from").text
dst = p.find("to").text
if int(src) > MAXPOINT or int(dst) > MAXPOINT:
continue
if int(src) < int(dst):
# since all legs are present twice (in both directions),
# skip one direction
continue
dist = p.find("dist").text
sea = p.find("sea").text
addtime = p.find("addtime").text
if dist is None:
print("** error: no distance: src: %s dst: %s" % (src, dst))
properties = {"src": src,
"dst": dst,
"dist": float(dist)}
if properties["dist"] == 0 and addtime == "1":
properties["addtime"] = True;
src_coords = all_points[src]
dst_coords = all_points[dst]
geometry = {"type": "LineString",
"coordinates": [src_coords, dst_coords]}
leg = {"type": "Feature",
"properties": properties,
"geometry": geometry},
if sea == "0":
coast.extend(leg)
else:
offshore.extend(leg)
return coast, offshore
if __name__ == '__main__':
run()
| 2.5625 | 3 |
rastervision/plugin.py | carderne/raster-vision | 3 | 4393 | import os
import json
import importlib
from pluginbase import PluginBase
import rastervision as rv
from rastervision.protos.plugin_pb2 import PluginConfig as PluginConfigMsg
from rastervision.utils.files import download_if_needed
class PluginError(Exception):
pass
def load_conf_list(s):
"""Loads a list of items from the config.
Lists should be comma separated.
This takes into account that previous versions of Raster Vision
allowed for a `[ "module" ]` like syntax, even though that didn't
work for multi-value lists.
"""
try:
# A comma separated list of values will be transformed to
# having a list-like string, with ' instead of ". Replacing
# single quotes with double quotes lets us parse it as a JSON list.
return json.loads(s.replace("'", '"'))
except json.JSONDecodeError:
return list(map(lambda x: x.strip(), s.split(',')))
class PluginRegistry:
@staticmethod
def get_instance():
return rv._registry._get_plugin_registry()
def __init__(self, plugin_config, rv_home):
"""Initializes this plugin registry.
A plugin registry is passed to plugins in a call
to their "register_plugin" method.
Args:
plugin_config - the everett ConfigManager for the plugin
section of the application configuration.
"""
self.plugin_root_dir = os.path.join(rv_home, 'plugins')
self.config_builders = {}
self.command_config_builders = {}
self.commands = []
self.aux_command_classes = {}
self.default_raster_sources = []
self.default_vector_sources = []
self.default_label_sources = []
self.default_label_stores = []
self.default_evaluators = []
self.experiment_runners = {}
self.filesystems = []
plugin_files = load_conf_list(plugin_config('files', default='[]'))
self._load_from_files(plugin_files)
self.plugin_files = plugin_files
plugin_modules = load_conf_list(plugin_config('modules', default='[]'))
self._load_from_modules(plugin_modules)
self.plugin_modules = plugin_modules
def _load_plugin(self, plugin, identifier):
# Check the plugin is valid
if not hasattr(plugin, 'register_plugin'):
raise PluginError('Plugin at {} does not have '
'"register_plugin" method.'.format(identifier))
register_method = getattr(plugin, 'register_plugin')
if not callable(register_method):
raise PluginError('Plugin at {} has a '
'"register_plugin" attribute, '
'but it is not callable'.format(identifier))
# TODO: Log loading plugin.
register_method(self)
def _load_from_files(self, plugin_paths):
if not plugin_paths:
return
self.plugin_sources = []
plugin_base = PluginBase(package='rastervision.plugins')
for uri in plugin_paths:
plugin_name = os.path.splitext(os.path.basename(uri))[0]
plugin_path = os.path.join(self.plugin_root_dir, plugin_name)
fs = rv._registry.get_file_system(uri, search_plugins=False)
local_path = download_if_needed(uri, plugin_path, fs=fs)
local_dir = os.path.dirname(local_path)
plugin_source = plugin_base.make_plugin_source(
searchpath=[local_dir])
# We're required to hang onto the source
# to keep it from getting GC'd.
self.plugin_sources.append(plugin_source)
self._load_plugin(plugin_source.load_plugin(plugin_name), uri)
def _load_from_modules(self, plugin_modules):
if not plugin_modules:
return
for module in plugin_modules:
plugin = importlib.import_module(module)
self._load_plugin(plugin, module)
def add_plugins_from_proto(self, plugin_msg):
new_plugin_files = list(
set(plugin_msg.plugin_uris) - set(self.plugin_files))
self._load_from_files(new_plugin_files)
self.plugin_files.extend(new_plugin_files)
new_plugin_modules = list(
set(plugin_msg.plugin_modules) - set(self.plugin_modules))
self._load_from_modules(new_plugin_modules)
self.plugin_modules.extend(new_plugin_modules)
def to_proto(self):
"""Returns a protobuf message that records the
plugin sources for plugins that are currently loaded
in the registry.
"""
return PluginConfigMsg(
plugin_uris=self.plugin_files, plugin_modules=self.plugin_modules)
def register_config_builder(self, group, key, builder_class):
"""Registers a ConfigBuilder as a plugin.
Args:
group - The Config group, e.g. rv.BACKEND, rv.TASK.
key - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
builder_class - The subclass of ConfigBuilder that builds
the Config for this plugin.
"""
if (group, key) in self.config_builders:
raise PluginError('ConfigBuilder already registered for group '
'{} and key {}'.format(group, key))
self.config_builders[(group, key)] = builder_class
def register_command_config_builder(self, command_type, builder_class):
"""Registers a ConfigBuilder as a plugin.
Args:
command_type - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
builder_class - The subclass of CommandConfigBuilder that builds
the CommandConfig for this plugin.
"""
if command_type in self.command_config_builders:
raise PluginError(
'CommandConfigBuilder already registered for command'
'with type {}'.format(command_type))
self.command_config_builders[command_type] = builder_class
self.commands.append(command_type)
def register_aux_command(self, command_type, command_class):
"""Registers a custom AuxCommand as a plugin.
Args:
command_type - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
command_class - The subclass of AuxCommand subclass to register.
"""
if command_type in self.command_config_builders:
raise PluginError(
'CommandConfigBuilder is already registered for command'
'with type {}'.format(command_type))
if command_type in self.aux_command_classes:
raise PluginError('AuxCommand is already registered for command'
'with type {}'.format(command_type))
self.aux_command_classes[command_type] = command_class
if command_class.options.include_by_default:
self.commands.append(command_type)
def register_default_raster_source(self, provider_class):
"""Registers a RasterSourceDefaultProvider for use as a plugin."""
self.default_raster_sources.append(provider_class)
def register_default_vector_source(self, provider_class):
"""Registers a VectorSourceDefaultProvider for use as a plugin."""
self.default_vector_sources.append(provider_class)
def register_default_label_source(self, provider_class):
"""Registers a LabelSourceDefaultProvider for use as a plugin."""
self.default_label_sources.append(provider_class)
def register_default_label_store(self, provider_class):
"""Registers a LabelStoreDefaultProvider for use as a plugin."""
self.default_label_stores.append(provider_class)
def register_default_evaluator(self, provider_class):
"""Registers an EvaluatorDefaultProvider for use as a plugin."""
self.default_evaluators.append(provider_class)
def register_experiment_runner(self, runner_key, runner_class):
"""Registers an ExperimentRunner as a plugin.
Args:
runner_key - The key used to reference this plugin runner.
This is a string that will match the command line
argument used to reference this runner; e.g. if the
key is "FOO_RUNNER", then users can use the runner
by issuing a "rastervision run foo_runner ..." command.
runner_class - The class of the ExperimentRunner plugin.
"""
if runner_key in self.experiment_runners:
raise PluginError('ExperimentRunner already registered for '
'key {}'.format(runner_key))
self.experiment_runners[runner_key] = runner_class
def register_filesystem(self, filesystem_class):
"""Registers a FileSystem as a plugin."""
self.filesystems.append(filesystem_class)
| 2.234375 | 2 |
acsm/nnutils/resunet.py | eldar/acsm | 52 | 4394 | <filename>acsm/nnutils/resunet.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os
import os.path as osp
import numpy as np
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
import functools
from . import net_blocks as nb
import pdb
class ResNetConcatGenerator(nn.Module):
def __init__(self, input_nc, output_nc, n_blocks=3, ngf=64,):
super(ResNetConcatGenerator, self).__init__()
self.encoder = ResnetEncoder(n_blocks=n_blocks)
self.n_blocks = n_blocks
decoder = []
if n_blocks == 3:
inner_nc = 256
nlayers = 4
elif n_blocks == 4:
inner_nc = 512
nlayers = 5
for lx in range(nlayers):
outnc = max(inner_nc // 2, 16)
up = nb.upconv2d(inner_nc, outnc)
decoder.append(up)
inner_nc = outnc
up = nn.Conv2d(
inner_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=True)
decoder.append(up)
self.decoder = nn.Sequential(*decoder)
nb.net_init(self.decoder)
return
def forward(self, input):
img_enc = self.encoder(input)
img_dec = self.decoder(img_enc)
return img_dec
def reinit_weights(self, ):
self.encoder = ResnetEncoder(n_blocks=self.n_blocks)
nb.net_init(self.decoder)
class ResnetEncoder(nn.Module):
def __init__(self, n_blocks):
super(ResnetEncoder, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained=True)
self.n_blocks = n_blocks
def forward(self, x):
n_blocks = self.n_blocks
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
if n_blocks >= 1:
x = self.resnet.layer1(x)
if n_blocks >= 2:
x = self.resnet.layer2(x)
if n_blocks >= 3:
x = self.resnet.layer3(x)
if n_blocks >= 4:
x = self.resnet.layer4(x)
return x
| 1.96875 | 2 |
uproot_methods/common/TVector.py | marinang/uproot-methods | 0 | 4395 | <gh_stars>0
#!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numbers
import operator
import awkward
import awkward.util
class Common(object):
@property
def mag2(self):
return self.dot(self)
@property
def mag(self):
return awkward.util.numpy.sqrt(self.mag2)
@property
def rho2(self):
out = self.x*self.x
out = out + self.y*self.y
return out
def delta_phi(self, other):
return (self.phi - other.phi + math.pi) % (2*math.pi) - math.pi
def isparallel(self, other, tolerance=1e-10):
return 1 - self.cosdelta(other) < tolerance
def isantiparallel(self, other, tolerance=1e-10):
return self.cosdelta(other) - (-1) < tolerance
def iscollinear(self, other, tolerance=1e-10):
return 1 - awkward.util.numpy.absolute(self.cosdelta(other)) < tolerance
def __lt__(self, other):
raise TypeError("spatial vectors have no natural ordering")
def __gt__(self, other):
raise TypeError("spatial vectors have no natural ordering")
def __le__(self, other):
raise TypeError("spatial vectors have no natural ordering")
def __ge__(self, other):
raise TypeError("spatial vectors have no natural ordering")
class ArrayMethods(Common):
@property
def unit(self):
return self / self.mag
@property
def rho(self):
out = self.rho2
return awkward.util.numpy.sqrt(out)
@property
def phi(self):
return awkward.util.numpy.arctan2(self.y, self.x)
def cosdelta(self, other):
denom = self.mag2 * other.mag2
mask = (denom > 0)
denom = denom[mask]
denom[:] = awkward.util.numpy.sqrt(denom)
out = self.dot(other)
out[mask] /= denom
mask = awkward.util.numpy.logical_not(mask)
out[mask] = 1
return awkward.util.numpy.clip(out, -1, 1)
def angle(self, other, normal=None, degrees=False):
out = awkward.util.numpy.arccos(self.cosdelta(other))
if normal is not None:
a = self.unit
b = other.unit
out = out * awkward.util.numpy.sign(normal.dot(a.cross(b)))
if degrees:
out = awkward.util.numpy.multiply(out, 180.0/awkward.util.numpy.pi)
return out
def isopposite(self, other, tolerance=1e-10):
tmp = self + other
tmp.x = awkward.util.numpy.absolute(tmp.x)
tmp.y = awkward.util.numpy.absolute(tmp.y)
tmp.z = awkward.util.numpy.absolute(tmp.z)
out = (tmp.x < tolerance)
out = awkward.util.numpy.bitwise_and(out, tmp.y < tolerance)
out = awkward.util.numpy.bitwise_and(out, tmp.z < tolerance)
return out
def isperpendicular(self, other, tolerance=1e-10):
tmp = self.dot(other)
tmp.x = awkward.util.numpy.absolute(tmp.x)
tmp.y = awkward.util.numpy.absolute(tmp.y)
tmp.z = awkward.util.numpy.absolute(tmp.z)
out = (tmp.x < tolerance)
out = awkward.util.numpy.bitwise_and(out, tmp.y < tolerance)
out = awkward.util.numpy.bitwise_and(out, tmp.z < tolerance)
return out
class Methods(Common):
@property
def unit(self):
return self / self.mag
@property
def rho(self):
return math.sqrt(self.rho2)
@property
def phi(self):
return math.atan2(self.y, self.x)
def cosdelta(self, other):
m1 = self.mag2
m2 = other.mag2
if m1 == 0 or m2 == 0:
return 1.0
r = self.dot(other) / math.sqrt(m1 * m2)
return max(-1.0, min(1.0, r))
def angle(self, other, degrees=False):
out = math.acos(self.cosdelta(other))
if degrees:
out = out * 180.0/math.pi
return out
def isopposite(self, other, tolerance=1e-10):
tmp = self + other
return abs(tmp.x) < tolerance and abs(tmp.y) < tolerance and abs(tmp.z) < tolerance
def isperpendicular(self, other, tolerance=1e-10):
tmp = self.dot(other)
return abs(tmp.x) < tolerance and abs(tmp.y) < tolerance and abs(tmp.z) < tolerance
def __add__(self, other):
return self._vector(operator.add, other)
def __radd__(self, other):
return self._vector(operator.add, other, True)
def __sub__(self, other):
return self._vector(operator.sub, other)
def __rsub__(self, other):
return self._vector(operator.sub, other, True)
def __mul__(self, other):
return self._scalar(operator.mul, other)
def __rmul__(self, other):
return self._scalar(operator.mul, other, True)
def __div__(self, other):
return self._scalar(operator.div, other)
def __rdiv__(self, other):
return self._scalar(operator.div, other, True)
def __truediv__(self, other):
return self._scalar(operator.truediv, other)
def __rtruediv__(self, other):
return self._scalar(operator.truediv, other, True)
def __floordiv__(self, other):
return self._scalar(operator.floordiv, other)
def __rfloordiv__(self, other):
return self._scalar(operator.floordiv, other, True)
def __mod__(self, other):
return self._scalar(operator.mod, other)
def __rmod__(self, other):
return self._scalar(operator.mod, other, True)
def __divmod__(self, other):
return self._scalar(operator.divmod, other)
def __rdivmod__(self, other):
return self._scalar(operator.divmod, other, True)
def __pow__(self, other):
if isinstance(other, (numbers.Number, awkward.util.numpy.number)):
if other == 2:
return self.mag2
else:
return self.mag2**(0.5*other)
else:
self._scalar(operator.pow, other)
# no __rpow__
def __lshift__(self, other):
return self._scalar(operator.lshift, other)
def __rlshift__(self, other):
return self._scalar(operator.lshift, other, True)
def __rshift__(self, other):
return self._scalar(operator.rshift, other)
def __rrshift__(self, other):
return self._scalar(operator.rshift, other, True)
def __and__(self, other):
return self._scalar(operator.and_, other)
def __rand__(self, other):
return self._scalar(operator.and_, other, True)
def __or__(self, other):
return self._scalar(operator.or_, other)
def __ror__(self, other):
return self._scalar(operator.or_, other, True)
def __xor__(self, other):
return self._scalar(operator.xor, other)
def __rxor__(self, other):
return self._scalar(operator.xor, other, True)
def __neg__(self):
return self._unary(operator.neg)
def __pos__(self):
return self._unary(operator.pos)
def __abs__(self):
return self.mag
def __invert__(self):
return self._unary(operator.invert)
| 1.515625 | 2 |
tpp/controller/ConversionController.py | pennyarcade/TPPP | 0 | 4396 | """
Implements a non interactive controller to controt non-interactive visualizers.
(i.e. those that are used for converting TPP souce code into another format)
"""
from tpp.FileParser import FileParser
from tpp.controller.TPPController import TPPController
class ConversionController(TPPController):
"""
Implements a non interactive controller to run non-interactive visualizers.
(i.e. those that are used for converting TPP source code into another format)
"""
def __init__(self, input_file, output, visualizer_class):
"""
Todo: ApiDoc.
:rtype:
:param input:
:param output:
:param visualizer_class:
"""
super(ConversionController, self).__init__()
parser = FileParser(input_file)
self.pages = parser.get_pages()
self.vis = visualizer_class(output)
def run(self):
"""
Todo: ApiDoc.
:return:
"""
for page in self.pages:
while True:
eop = page.is_eop()
self.vis.visualize(page.next_line(), eop)
if eop:
break
def close(self):
"""
Todo: ApiDoc.
:return:
"""
self.vis.close()
| 3.25 | 3 |
scrapers/covid_scraper.py | ZachGeo/covidGR_API | 0 | 4397 | from bs4 import BeautifulSoup
from datetime import date
from lxml import html
import requests
import re
import json
class CovidScraper:
def __init__(self):
self.api_url = 'http://127.0.0.1:5000/covidgr'
self.api_sum_url = 'http://127.0.0.1:5000/summary/covidgr'
self.api_test_url = 'http://127.0.0.1:5000/covidgr/tests'
self.scrape_url = 'https://www.worldometers.info/coronavirus/country/greece/'
self.scrape_tests_url = 'https://github.com/owid/covid-19-data/blob/master/public/data/testing/covid-testing-latest-data-source-details.csv'
self.today = ''
self.covid_data = []
self.summary_data= []
def scrape_data(self):
data = []
self.today = str(date.today())
soup = self.scrape_page_content()
soup_test_page = self.scrape_page_content_contains_tests()
if soup:
self.get_daily_data(soup)
self.get_summary_data(soup)
if self.summary_data and self.covid_data:
post_daily_and_sum_covid_data = self.call_api_put_data(
self.today, self.covid_data, self.summary_data)
data.append(post_daily_and_sum_covid_data)
if soup_test_page:
tests_data = self.get_tests_per_day(soup_test_page)
if tests_data[0]:
post_daily_tests_covid_data = self.call_api_post_tested_covid_data(
tests_data[0], tests_data[1])
data.append(post_daily_tests_covid_data)
return data
def scrape_page_content(self):
page = requests.get(self.scrape_url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
def scrape_page_content_contains_tests(self):
page = requests.get(self.scrape_tests_url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
def get_daily_data(self, soup):
covid_data = []
daily_covidgr_html_content = soup.find('li', class_='news_li')
get_daily_covidgr_text = daily_covidgr_html_content.text
for elem in get_daily_covidgr_text.split():
regex = '\d*(.|)\d+'
match = re.findall(regex, elem)
if match:
covid_data.append(elem)
self.covid_data = covid_data
def get_summary_data(self, soup):
summary_data = []
all_cases_covidgr_html_content = soup.find_all(
'div', class_='maincounter-number')
for item in range(len(all_cases_covidgr_html_content)):
regex = r'(\n)|\s'
all_cases_data = re.sub(
regex, '', all_cases_covidgr_html_content[item].text)
summary_data.append(all_cases_data)
self.summary_data = summary_data
def get_tests_per_day(self, tree):
html_content = tree.find('tr', id='LC34').find_all('td')
country_code = html_content[1]
date_test = html_content[3].text
if country_code.text == 'GRC':
today_tests = html_content[10].text
total_tests = html_content[8].text
return [date_test, today_tests]
def call_api_post_tested_covid_data(self, today, tests):
headers = {
'Content-type': 'application/json',
}
data = json.dumps({"date": today, "daily_test": tests})
response_tests = requests.post(
self.api_test_url, headers=headers, data=data)
return response_tests.json()
def call_api_put_data(self, today, covid_data, summary_data):
headers = {
'Content-type': 'application/json',
}
data = json.dumps(
{"date": today, "cases": covid_data[0], "deaths": covid_data[1]})
sum_data = json.dumps(
{"sum_cases": summary_data[0], "sum_deaths": summary_data[1], "sum_recovered": summary_data[2]})
response = requests.post(self.api_url, headers=headers, data=data)
response_sum = requests.put(
self.api_sum_url, headers=headers, data=sum_data)
return [response.json(), response_sum.json()]
if __name__ == '__main__':
cs = CovidScraper()
results = cs.scrape_data()
print(results)
| 3.078125 | 3 |
img/autoeditimg.py | schorsche/css3-imageslider | 0 | 4398 | #!/usr/bin/python2.7
import os
from PIL import Image
DATEI_WEB_GROSSE = 700
def isimg(isitimg):
ext = os.path.splitext(isitimg)[1].lower()
if ext == ".jpg" or ext == ".png" or ext == ".gif":
return True
return False
def bearbeiten(datei):
img = Image.open(datei)
wrel = DATEI_WEB_GROSSE / float(img.size[0])
habs = int( float(img.size[1]) * float(wrel) )
splt = os.path.splitext(datei)
newfilename = splt[0] + splt[1].lower()
img = img.resize((DATEI_WEB_GROSSE, habs), Image.ANTIALIAS)
img.save(newfilename, quality=100, optimize=True, progressive=True)
if newfilename != datei:
os.rename(newfilename, datei)
def main():
files = os.listdir('.')
files = filter(isimg, files)
for f in files:
print f
bearbeiten(f)
if __name__ == '__main__':
main() | 2.78125 | 3 |
wow/wow.py | brisberg/Kiri-Cogs | 0 | 4399 | import discord
from discord.ext import commands
class WowCog:
"""Custom Cog that had commands for WoW Memes"""
def __init__(self, bot):
self.bot = bot
async def _play(self, url, ctx):
"""Helper for aliasing Play in the Audio module"""
audio = self.bot.get_cog('Audio')
if not audio:
await self.bot.say("Audio module required. Load with: {}load audio".format(ctx.prefix))
return
await ctx.invoke(audio.play, url_or_search_terms=url)
@commands.command(pass_context=True, no_pm=True)
async def flamewreath(self, ctx):
"""I will not move when Flame Wreath is cast!"""
await self._play("https://www.youtube.com/watch?v=gcA6y7sxKcA", ctx)
def setup(bot):
bot.add_cog(WowCog(bot))
| 2.8125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.