text
stringlengths 4
1.02M
| meta
dict |
---|---|
try:
from concurrent.futures import ProcessPoolExecutor
have_process_pool_executor = True # pragma: no cover
except ImportError:
have_process_pool_executor = False
class _SerialFuture(object):
def __init__(self, fn, args, kwargs):
self._fn = fn
self._args = args
self._kwargs = kwargs
def result(self):
return self._fn(*self._args, **self._kwargs)
def cancel(self):
pass
# It's important that we defer execution of the function until .result() is
# called, because zs.reader.ZS calls .submit() from a separate thread, and
# .result() from the main thread. We don't want to be shunting arbitrary
# calls off to the separate thread. Also, this ensures that exceptions are
# raised from result(), not submit().
class SerialExecutor(object):
def submit(self, fn, *args, **kwargs):
return _SerialFuture(fn, args, kwargs)
def shutdown(self):
pass # pragma: no cover
if not have_process_pool_executor:
# then fake it!
import multiprocessing
class _MultiprocessingFuture(object):
def __init__(self, async_result):
self._async_result = async_result
def result(self):
return self._async_result.get()
def cancel(self):
# Can't be done!
pass
class ProcessPoolExecutor(object):
def __init__(self, num_workers):
self._pool = multiprocessing.Pool(num_workers)
def submit(self, fn, *args, **kwargs):
async_result = self._pool.apply_async(fn, args, kwargs)
return _MultiprocessingFuture(async_result)
def shutdown(self):
self._pool.terminate()
self._pool.join()
| {
"content_hash": "c37703bc51bc477ca915a05197ea86ce",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 75,
"avg_line_length": 30.19298245614035,
"alnum_prop": 0.6269610691458455,
"repo_name": "njsmith/zs",
"id": "874a6a5ef0dba03c752d410470d4213ef4e73c49",
"size": "2893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zs/futures.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "10097"
},
{
"name": "Python",
"bytes": "193177"
}
],
"symlink_target": ""
} |
import os
import shutil
import sys
import tempfile
import time
from importlib import import_module
from pathlib import Path
from unittest import mock
import pytest
import ray
from ray._private import gcs_utils
from ray._private.runtime_env.context import RuntimeEnvContext
from ray._private.runtime_env.packaging import (
get_uri_for_directory,
upload_package_if_needed,
)
from ray._private.runtime_env.working_dir import (
WorkingDirPlugin,
set_pythonpath_in_context,
)
from ray._private.utils import get_directory_size_bytes
# This test requires you have AWS credentials set up (any AWS credentials will
# do, this test only accesses a public bucket).
# This package contains a subdirectory called `test_module`.
# Calling `test_module.one()` should return `2`.
# If you find that confusing, take it up with @jiaodong...
HTTPS_PACKAGE_URI = "https://github.com/shrekris-anyscale/test_module/archive/HEAD.zip"
S3_PACKAGE_URI = "s3://runtime-env-test/test_runtime_env.zip"
GS_PACKAGE_URI = "gs://public-runtime-env-test/test_module.zip"
TEST_IMPORT_DIR = "test_import_dir"
# Set scope to "module" to force this to run before start_cluster, whose scope
# is "function". We need these env vars to be set before Ray is started.
@pytest.fixture(scope="module")
def insert_test_dir_in_pythonpath():
with mock.patch.dict(
os.environ,
{"PYTHONPATH": TEST_IMPORT_DIR + os.pathsep + os.environ.get("PYTHONPATH", "")},
):
yield
@pytest.mark.asyncio
async def test_create_delete_size_equal(tmpdir, ray_start_regular):
"""Tests that `create` and `delete_uri` return the same size for a URI."""
gcs_aio_client = gcs_utils.GcsAioClient(
address=ray.worker.global_worker.gcs_client.address
)
# Create an arbitrary nonempty directory to upload.
path = Path(tmpdir)
dir_to_upload = path / "dir_to_upload"
dir_to_upload.mkdir(parents=True)
filepath = dir_to_upload / "file"
with filepath.open("w") as file:
file.write("F" * 100)
uri = get_uri_for_directory(dir_to_upload)
assert get_directory_size_bytes(dir_to_upload) > 0
uploaded = upload_package_if_needed(uri, tmpdir, dir_to_upload)
assert uploaded
manager = WorkingDirPlugin(tmpdir, gcs_aio_client)
created_size_bytes = await manager.create(uri, {}, RuntimeEnvContext())
deleted_size_bytes = manager.delete_uri(uri)
assert created_size_bytes == deleted_size_bytes
def test_inherit_cluster_env_pythonpath(monkeypatch):
monkeypatch.setenv(
"PYTHONPATH", "last" + os.pathsep + os.environ.get("PYTHONPATH", "")
)
context = RuntimeEnvContext(env_vars={"PYTHONPATH": "middle"})
set_pythonpath_in_context("first", context)
assert context.env_vars["PYTHONPATH"].startswith(
os.pathsep.join(["first", "middle", "last"])
)
@pytest.mark.parametrize(
"option",
[
"failure",
"working_dir",
"working_dir_zip",
"py_modules",
"working_dir_and_py_modules",
],
)
def test_lazy_reads(
insert_test_dir_in_pythonpath, start_cluster, tmp_working_dir, option: str
):
"""Tests the case where we lazily read files or import inside a task/actor.
This tests both that this fails *without* the working_dir and that it
passes with it. Also tests that the existing PYTHONPATH is preserved,
so packages preinstalled on the cluster are still importable when using
py_modules or working_dir.
"""
cluster, address = start_cluster
def call_ray_init():
if option == "failure":
# Don't pass the files at all, so it should fail!
ray.init(address)
elif option == "working_dir":
ray.init(address, runtime_env={"working_dir": tmp_working_dir})
elif option == "working_dir_zip":
# Create a temp dir to place the zipped package
# from tmp_working_dir
with tempfile.TemporaryDirectory() as tmp_dir:
zip_dir = Path(tmp_working_dir)
package = shutil.make_archive(
os.path.join(tmp_dir, "test"), "zip", zip_dir
)
ray.init(address, runtime_env={"working_dir": package})
elif option == "py_modules":
ray.init(
address,
runtime_env={
"py_modules": [
str(Path(tmp_working_dir) / "test_module"),
Path(os.path.dirname(__file__))
/ "pip_install_test-0.5-py3-none-any.whl",
]
},
)
elif option == "working_dir_and_py_modules":
ray.init(
address,
runtime_env={
"working_dir": tmp_working_dir,
"py_modules": [
str(Path(tmp_working_dir) / "test_module"),
Path(os.path.dirname(__file__))
/ "pip_install_test-0.5-py3-none-any.whl",
],
},
)
else:
raise ValueError(f"unexpected pytest parameter {option}")
call_ray_init()
def reinit():
ray.shutdown()
# TODO(SongGuyang): Currently, reinit the driver will generate the same
# job id. And if we reinit immediately after shutdown, raylet may
# process new job started before old job finished in some cases. This
# inconsistency could disorder the URI reference and delete a valid
# runtime env. We sleep here to walk around this issue.
time.sleep(5)
call_ray_init()
@ray.remote
def test_import():
import test_module
assert TEST_IMPORT_DIR in os.environ.get("PYTHONPATH", "")
return test_module.one()
if option == "failure":
with pytest.raises(ImportError):
ray.get(test_import.remote())
else:
assert ray.get(test_import.remote()) == 1
if option in {"py_modules", "working_dir_and_py_modules"}:
@ray.remote
def test_py_modules_whl():
import pip_install_test # noqa: F401
return True
assert ray.get(test_py_modules_whl.remote())
if option in {"py_modules", "working_dir_zip"}:
# These options are not tested beyond this point, so return to save time.
return
reinit()
@ray.remote
def test_read():
return open("hello").read()
if option == "failure":
with pytest.raises(FileNotFoundError):
ray.get(test_read.remote())
elif option in {"working_dir_and_py_modules", "working_dir"}:
assert ray.get(test_read.remote()) == "world"
reinit()
@ray.remote
class Actor:
def test_import(self):
import test_module
assert TEST_IMPORT_DIR in os.environ.get("PYTHONPATH", "")
return test_module.one()
def test_read(self):
assert TEST_IMPORT_DIR in os.environ.get("PYTHONPATH", "")
return open("hello").read()
a = Actor.remote()
if option == "failure":
with pytest.raises(ImportError):
assert ray.get(a.test_import.remote()) == 1
with pytest.raises(FileNotFoundError):
assert ray.get(a.test_read.remote()) == "world"
elif option in {"working_dir_and_py_modules", "working_dir"}:
assert ray.get(a.test_import.remote()) == 1
assert ray.get(a.test_read.remote()) == "world"
@pytest.mark.parametrize("option", ["failure", "working_dir", "py_modules"])
def test_captured_import(start_cluster, tmp_working_dir, option: str):
"""Tests importing a module in the driver and capturing it in a task/actor.
This tests both that this fails *without* the working_dir and that it
passes with it.
"""
cluster, address = start_cluster
def call_ray_init():
if option == "failure":
# Don't pass the files at all, so it should fail!
ray.init(address)
elif option == "working_dir":
ray.init(address, runtime_env={"working_dir": tmp_working_dir})
elif option == "py_modules":
ray.init(
address,
runtime_env={
"py_modules": [os.path.join(tmp_working_dir, "test_module")]
},
)
call_ray_init()
def reinit():
ray.shutdown()
# TODO(SongGuyang): Currently, reinit the driver will generate the same
# job id. And if we reinit immediately after shutdown, raylet may
# process new job started before old job finished in some cases. This
# inconsistency could disorder the URI reference and delete a valid
# runtime env. We sleep here to walk around this issue.
time.sleep(5)
call_ray_init()
# Import in the driver.
sys.path.insert(0, tmp_working_dir)
import test_module
@ray.remote
def test_import():
return test_module.one()
if option == "failure":
with pytest.raises(Exception):
ray.get(test_import.remote())
else:
assert ray.get(test_import.remote()) == 1
reinit()
@ray.remote
class Actor:
def test_import(self):
return test_module.one()
if option == "failure":
with pytest.raises(Exception):
a = Actor.remote()
assert ray.get(a.test_import.remote()) == 1
else:
a = Actor.remote()
assert ray.get(a.test_import.remote()) == 1
def test_empty_working_dir(start_cluster):
"""Tests the case where we pass an empty directory as the working_dir."""
cluster, address = start_cluster
with tempfile.TemporaryDirectory() as working_dir:
ray.init(address, runtime_env={"working_dir": working_dir})
@ray.remote
def listdir():
return os.listdir()
assert len(ray.get(listdir.remote())) == 0
@ray.remote
class A:
def listdir(self):
return os.listdir()
pass
a = A.remote()
assert len(ray.get(a.listdir.remote())) == 0
# Test that we can reconnect with no errors
ray.shutdown()
ray.init(address, runtime_env={"working_dir": working_dir})
@pytest.mark.parametrize("option", ["working_dir", "py_modules"])
def test_input_validation(start_cluster, option: str):
"""Tests input validation for working_dir and py_modules."""
cluster, address = start_cluster
with pytest.raises(TypeError):
if option == "working_dir":
ray.init(address, runtime_env={"working_dir": 10})
else:
ray.init(address, runtime_env={"py_modules": [10]})
ray.shutdown()
with pytest.raises(ValueError):
if option == "working_dir":
ray.init(address, runtime_env={"working_dir": "/does/not/exist"})
else:
ray.init(address, runtime_env={"py_modules": ["/does/not/exist"]})
ray.shutdown()
with pytest.raises(ValueError):
if option == "working_dir":
ray.init(address, runtime_env={"working_dir": "does_not_exist"})
else:
ray.init(address, runtime_env={"py_modules": ["does_not_exist"]})
ray.shutdown()
for uri in ["https://no_dot_zip", "s3://no_dot_zip", "gs://no_dot_zip"]:
with pytest.raises(ValueError):
if option == "working_dir":
ray.init(address, runtime_env={"working_dir": uri})
else:
ray.init(address, runtime_env={"py_modules": [uri]})
ray.shutdown()
if option == "py_modules":
with pytest.raises(TypeError):
# Must be in a list.
ray.init(address, runtime_env={"py_modules": "."})
@pytest.mark.parametrize("option", ["working_dir", "py_modules"])
def test_exclusion(start_cluster, tmp_working_dir, option):
"""Tests various forms of the 'excludes' parameter."""
cluster, address = start_cluster
def create_file(p, empty=False):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
if not empty:
f.write("Test")
working_path = Path(tmp_working_dir)
create_file(working_path / "__init__.py", empty=True)
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
module_name = Path(tmp_working_dir).name
# Test that all files are present without excluding.
if option == "working_dir":
ray.init(address, runtime_env={"working_dir": tmp_working_dir})
else:
ray.init(address, runtime_env={"py_modules": [tmp_working_dir]})
@ray.remote
def check_file(name):
if option == "py_modules":
try:
module = import_module(module_name)
except ImportError:
return "FAILED"
name = os.path.join(module.__path__[0], name)
try:
with open(name) as f:
return f.read()
except Exception:
return "FAILED"
def get_all():
return ray.get(
[
check_file.remote("test1"),
check_file.remote("test2"),
check_file.remote("test3"),
check_file.remote(os.path.join("tmp_dir", "test_1")),
check_file.remote(os.path.join("tmp_dir", "test_2")),
check_file.remote(os.path.join("tmp_dir", "test_3")),
check_file.remote(os.path.join("tmp_dir", "sub_dir", "test_1")),
check_file.remote(os.path.join("tmp_dir", "sub_dir", "test_2")),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
]
)
assert get_all() == [
"Test",
"Test",
"Test",
"Test",
"Test",
"Test",
"Test",
"Test",
"Test",
"Test",
"Test",
]
ray.shutdown()
# Test various exclusion methods.
excludes = [
# exclude by relative path
"test2",
# exclude by dir
str((Path("tmp_dir") / "sub_dir").as_posix()),
# exclude part of the dir
str((Path("tmp_dir") / "test_1").as_posix()),
# exclude part of the dir
str((Path("tmp_dir") / "test_2").as_posix()),
]
if option == "working_dir":
ray.init(
address, runtime_env={"working_dir": tmp_working_dir, "excludes": excludes}
)
else:
ray.init(
address, runtime_env={"py_modules": [tmp_working_dir], "excludes": excludes}
)
assert get_all() == [
"Test",
"FAILED",
"Test",
"FAILED",
"FAILED",
"Test",
"FAILED",
"FAILED",
"Test",
"Test",
"Test",
]
ray.shutdown()
# Test excluding all files using gitignore pattern matching syntax
excludes = ["*"]
if option == "working_dir":
ray.init(
address, runtime_env={"working_dir": tmp_working_dir, "excludes": excludes}
)
else:
module_name = Path(tmp_working_dir).name
ray.init(
address, runtime_env={"py_modules": [tmp_working_dir], "excludes": excludes}
)
assert get_all() == [
"FAILED",
"FAILED",
"FAILED",
"FAILED",
"FAILED",
"FAILED",
"FAILED",
"FAILED",
"FAILED",
"FAILED",
"FAILED",
]
ray.shutdown()
# Test excluding with a .gitignore file.
with open(f"{tmp_working_dir}/.gitignore", "w") as f:
f.write(
"""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
"""
)
if option == "working_dir":
ray.init(address, runtime_env={"working_dir": tmp_working_dir})
else:
module_name = Path(tmp_working_dir).name
ray.init(address, runtime_env={"py_modules": [tmp_working_dir]})
assert get_all() == [
"FAILED",
"Test",
"Test",
"FAILED",
"FAILED",
"Test",
"Test",
"FAILED",
"FAILED",
"FAILED",
"FAILED",
]
def test_override_failure(shutdown_only):
"""Tests invalid override behaviors."""
ray.init()
with pytest.raises(ValueError):
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
@ray.remote
def g():
pass
with pytest.raises(ValueError):
g.options(runtime_env={"working_dir": "."})
with pytest.raises(ValueError):
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
@ray.remote
class B:
pass
with pytest.raises(ValueError):
B.options(runtime_env={"working_dir": "."})
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| {
"content_hash": "3448cf8ee4f139197b5cf7d889fed45e",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 88,
"avg_line_length": 30.694444444444443,
"alnum_prop": 0.5692873303167421,
"repo_name": "ray-project/ray",
"id": "f145eea151f8911537d00432bd16fae9f4aaa6ce",
"size": "17680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tests/test_runtime_env_working_dir.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import logging
import sys
from .. import task
from ..hooks import jug_hook, register_hook, register_hook_once
from ..io import print_task_summary_table
from ..jug import init
from . import SubCommand, maybe_print_citation_info
__all__ = [
'execute'
]
def _sigterm(_, __):
sys.exit(1)
def _log_loadable(t):
logging.info('Loadable {0}...'.format(t.name))
class TaskStats(object):
def __init__(self):
self.loaded = defaultdict(int)
self.executed = defaultdict(int)
register_hook('execute.task-loadable', self.loadable)
register_hook('execute.task-executed1', self.executed1)
def loadable(self, t):
self.loaded[t.name] += 1
def executed1(self, t):
self.executed[t.name] += 1
class ExecuteCommand(SubCommand):
'''Execute tasks
execute(options)
Implement 'execute' command
'''
name = "execute"
def run(self, options, *args, **kwargs):
from signal import signal, SIGTERM
from ..jug import execution_loop
signal(SIGTERM, _sigterm)
tasks = task.alltasks
tstats = TaskStats()
store = None
register_hook_once('execute.task-loadable', '_log_loadable', _log_loadable)
nr_wait_cycles = int(options.execute_nr_wait_cycles)
noprogress = 0
failures = False
while noprogress < nr_wait_cycles:
del tasks[:]
store, jugspace = init(options.jugfile, options.jugdir, store=store)
if options.debug:
for t in tasks:
# Trigger hash computation:
t.hash()
previous = sum(tstats.executed.values())
failures = execution_loop(tasks, options) or failures
after = sum(tstats.executed.values())
done = not jugspace.get('__jug__hasbarrier__', False)
if done:
break
if after == previous:
from time import sleep
noprogress += 1
sleep(int(options.execute_wait_cycle_time))
else:
noprogress = 0
else:
logging.info('No tasks can be run!')
jug_hook('execute.finished_pre_status')
maybe_print_citation_info(options)
print_task_summary_table(options, [("Executed", tstats.executed), ("Loaded", tstats.loaded)])
jug_hook('execute.finished_post_status')
if failures:
sys.exit(1)
def parse(self, parser):
defaults = self.parse_defaults()
parser.add_argument('--wait-cycle-time', action='store', dest='execute_wait_cycle_time',
metavar='WAIT_CYCLE_TIME', type=int,
help=("How long to wait in each cycle (in seconds) "
"(Default: {execute_wait_cycle_time})".format(**defaults)))
parser.add_argument('--nr-wait-cycles', action='store',
dest='execute_nr_wait_cycles',
metavar='NR_WAIT_CYCLES', type=int,
help=("How many wait cycles to do "
"(Default: {execute_nr_wait_cycles})".format(**defaults)))
parser.add_argument('--target', action='store', dest='execute_target',
metavar='TARGET',
help="Restrict tasks to execute based on their name")
parser.add_argument('--keep-going',
action='store_const', const=True,
dest='execute_keep_going',
help='Continue after errors')
def parse_defaults(self):
wait_cycle_time = 12
default_values = {
"execute_keep_going": False,
"execute_target": None,
"execute_wait_cycle_time": wait_cycle_time,
"execute_nr_wait_cycles": (30 * 60) // wait_cycle_time,
}
return default_values
execute = ExecuteCommand()
| {
"content_hash": "5899fdfa4e29bf9364c0dc39f6e0c6a3",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 101,
"avg_line_length": 32.328,
"alnum_prop": 0.5515961395694136,
"repo_name": "unode/jug",
"id": "f560ff7275938012c541d9a32515713171a496ca",
"size": "5269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jug/subcommands/execute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nix",
"bytes": "795"
},
{
"name": "Python",
"bytes": "232822"
},
{
"name": "Shell",
"bytes": "520"
},
{
"name": "TeX",
"bytes": "23794"
}
],
"symlink_target": ""
} |
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import moving_averages
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = ['avg_pool2d',
'batch_norm',
'bias_add',
'conv2d',
'conv2d_in_plane',
'conv2d_transpose',
'convolution',
'convolution2d',
'convolution2d_in_plane',
'convolution2d_transpose',
'dropout',
'flatten',
'fully_connected',
'layer_norm',
'linear',
'pool',
'max_pool2d',
'one_hot_encoding',
'relu',
'relu6',
'repeat',
'separable_conv2d',
'separable_convolution2d',
'softmax',
'stack',
'unit_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu']
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if data_format == DATA_FORMAT_NHWC:
ksize = [1, kernel_h, kernel_w, 1]
strides = [1, stride_h, stride_w, 1]
else:
ksize = [1, 1, kernel_h, kernel_w]
strides = [1, 1, stride_h, stride_w]
outputs = nn.avg_pool(inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _fused_batch_norm(
inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, specially in distributed settings.
Args:
inputs: a tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: decay for the moving average.
center: If True, subtract `beta`. If False, `beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: optional initializers for beta, gamma, moving mean and
moving variance.
updates_collections: collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: if the rank of `inputs` is undefined.
ValueError: if the rank of `inputs` is neither 2 or 4.
ValueError: if rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
original_shape = inputs.get_shape()
original_rank = original_shape.ndims
if original_rank is None:
raise ValueError('Inputs %s has undefined rank' % inputs.name)
elif original_rank not in [2, 4]:
raise ValueError('Inputs %s has unsupported rank. \
Expected 2 or 4 but got %d' % (inputs.name, original_rank))
if original_rank == 2:
channels = inputs.get_shape()[-1].value
if channels is None:
raise ValueError('`C` dimension must be known but is None')
new_shape = [-1, channels, 1, 1] if data_format == DATA_FORMAT_NCHW else \
[-1, 1, 1, channels]
inputs = array_ops.reshape(inputs, new_shape)
inputs_shape = inputs.get_shape()
dtype = inputs.dtype.base_dtype
if data_format == DATA_FORMAT_NHWC:
params_shape = inputs_shape[-1:]
else:
params_shape = inputs_shape[1:2]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined `C` dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
trainable_beta = trainable and center
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer)
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable_beta)
trainable_gamma = trainable and scale
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer)
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable_gamma)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get('moving_mean',
init_ops.zeros_initializer)
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer)
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon, data_format=data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
is_training=False,
data_format=data_format)
outputs, mean, variance = utils.smart_cond(is_training,
_fused_batch_norm_training,
_fused_batch_norm_inference)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `need_updates` will be true.
is_training_value = utils.constant_value(is_training)
need_updates = is_training_value is None or is_training_value
if need_updates:
if updates_collections is None:
_no_updates = lambda: outputs
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=False)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(outputs)
outputs = utils.smart_cond(is_training, _force_updates, _no_updates)
else:
moving_vars_fn = lambda: (moving_mean, moving_variance)
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=False)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
outputs.set_shape(inputs_shape)
if original_shape.ndims == 2:
outputs = array_ops.reshape(outputs, original_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def batch_norm(
inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=False,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: When is_training is True the moving_mean and moving_variance need to be
updated, by default the update_ops are placed in `tf.GraphKeys.UPDATE_OPS` so
they need to be added as a dependency to the `train_op`, example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
One can set updates_collections=None to force the updates in place, but that
can have speed penalty, specially in distributed settings.
Args:
inputs: a tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: decay for the moving average.
center: If True, subtract `beta`. If False, `beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: optional initializers for beta, gamma, moving mean and
moving variance.
updates_collections: collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
fused: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: if `batch_weights` is not None and `fused` is True.
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: if `data_format` is `NCHW` while `fused` is False.
ValueError: if the rank of `inputs` is undefined.
ValueError: if rank or last dimension of `inputs` is undefined.
"""
if fused:
if batch_weights is not None:
raise ValueError('Weighted mean and variance is not currently '
'supported for fused batch norm.')
return _fused_batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=activation_fn,
param_initializers=param_initializers,
updates_collections=updates_collections,
is_training=is_training,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
data_format=data_format,
scope=scope)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
if data_format == DATA_FORMAT_NCHW:
raise ValueError('data_format must be NHWC if fused is False.')
with variable_scope.variable_scope(scope, 'BatchNorm', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if batch_weights is not None:
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
# Reshape batch weight values so they broadcast across inputs.
nshape = [-1] + [1 for _ in range(inputs_rank - 1)]
batch_weights = array_ops.reshape(batch_weights, nshape)
axis = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if not param_initializers:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer)
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer)
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables.
partitioner = variable_scope.get_variable_scope().partitioner
try:
variable_scope.get_variable_scope().set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer)
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer)
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
finally:
variable_scope.get_variable_scope().set_partitioner(partitioner)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
if batch_weights is None:
# Use a copy of moving_mean as a shift to compute more reliable moments.
shift = math_ops.add(moving_mean, 0)
mean, variance = nn.moments(inputs, axis, shift=shift)
else:
mean, variance = nn.weighted_moments(inputs, axis, batch_weights)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=False)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training,
_force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=False)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
# Compute batch_normalization.
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer,
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
data_format: A string. 'NHWC' and 'NCHW' are supported.
scope: Optional scope for variable_scope.
Returns:
a tensor representing the result of adding biases to the inputs.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: if `data_format` is `NCHW` and rank of `inputs` is not 4.
ValueError: if the rank of `inputs` is undefined.
ValueError: if rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(scope, 'BiasAdd', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Dims of shape must be known but is None')
elif inputs_rank != 4 and data_format == DATA_FORMAT_NCHW:
raise ValueError('Data format NCHW only supports 4D Tensor')
axis = 1 if data_format==DATA_FORMAT_NCHW else -1
num_features = inputs_shape[axis].value
if num_features is None:
raise ValueError('`C` dimension must be known but is None')
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
# TODO(jbms): change `rate` parameter to `dilation_rate` for consistency with
# underlying op.
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs a'trous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: a Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: integer, the number of output filters.
kernel_size: a sequence of N positive integers specifying the spatial
dimensions of of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: a sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: one of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, currently the only valid value is "NDHWC".
rate: a sequence of N positive integers specifying the dilation rate to use
for a'trous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
a tensor representing the output of the operation.
Raises:
ValueError: if `data_format` is invalid.
ValueError: both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC']:
raise ValueError('Invalid data_format: %r' % (data_format,))
with variable_scope.variable_scope(scope, 'Conv', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
input_rank = inputs.get_shape().ndims
if input_rank is None:
raise ValueError('Rank of inputs must be known')
if input_rank < 3 or input_rank > 5:
raise ValueError('Rank of inputs is %d, which is not >= 3 and <= 5' %
input_rank)
conv_dims = input_rank - 2
kernel_size = utils.n_positive_integers(conv_dims, kernel_size)
stride = utils.n_positive_integers(conv_dims, stride)
rate = utils.n_positive_integers(conv_dims, rate)
if data_format is None or data_format.endswith('C'):
num_input_channels = inputs.get_shape()[input_rank - 1].value
elif data_format.startswith('NC'):
num_input_channels = inputs.get_shape()[1].value
else:
raise ValueError('Invalid data_format')
if num_input_channels is None:
raise ValueError('Number of in_channels must be known.')
weights_shape = (
list(kernel_size) + [num_input_channels, num_outputs])
weights_collections = utils.get_variable_collections(variables_collections,
'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
outputs = nn.convolution(input=inputs,
filter=weights,
dilation_rate=rate,
strides=stride,
padding=padding,
data_format=data_format)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
convolution2d = convolution
@add_arg_scope
def convolution2d_in_plane(
inputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Performs the same in-plane convolution to each channel independently.
This is useful for performing various simple channel-independent convolution
operations such as image gradients:
image = tf.constant(..., shape=(16, 240, 320, 3))
vert_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[2, 1])
horz_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[1, 2])
Args:
inputs: a 4-D tensor with dimensions [batch_size, height, width, channels].
kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
stride: a list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding type to use, either 'SAME' or 'VALID'.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, 1, 1]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_filters_in,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `batch_norm_params` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
num_outputs: integer, the number of output filters.
kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
a tensor representing the output of the operation.
Raises:
ValueError: if 'kernel_size' is not a list of length 2.
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: if `C` dimension of `inputs` is None.
"""
with variable_scope.variable_scope(
scope, 'Conv2d_transpose', [inputs], reuse=reuse) as sc:
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if data_format == DATA_FORMAT_NCHW:
c_axis, h_axis, w_axis = 1, 2, 3
else:
h_axis, w_axis, c_axis = 1, 2, 3
num_filters_in = inputs.get_shape()[c_axis].value
if num_filters_in is None:
raise ValueError('`C` dimension of `inputs` must be known but is None.')
weights_shape = [kernel_h, kernel_w, num_outputs, num_filters_in]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable(
'weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
height, width = inputs_shape[h_axis], inputs_shape[w_axis]
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
if isinstance(dim_size, ops.Tensor):
dim_size = math_ops.mul(dim_size, stride_size)
elif dim_size is not None:
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# Infer the dynamic output shape:
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
if data_format == DATA_FORMAT_NHWC:
output_shape = [batch_size, out_height, out_width, num_outputs]
strides = [1, stride_h, stride_w, 1]
else:
output_shape = [batch_size, num_outputs, out_height, out_width]
strides = [1, 1, stride_h, stride_w]
output_shape = array_ops.pack(output_shape)
outputs = nn.conv2d_transpose(inputs, weights, output_shape,
strides,
padding=padding,
data_format=data_format)
# Infer the static output shape:
out_shape = inputs.get_shape().as_list()
out_shape[c_axis] = num_outputs
out_shape[h_axis] = get_deconv_dim(out_shape[h_axis], stride_h, kernel_h, padding)
out_shape[w_axis] = get_deconv_dim(out_shape[w_axis], stride_w, kernel_w, padding)
outputs.set_shape(out_shape)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: the tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
a tensor representing the output of the operation.
"""
with ops.name_scope(scope, 'Dropout', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape)
id_fn = lambda: array_ops.identity(inputs)
outputs = utils.smart_cond(is_training, dropout_fn, id_fn)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if (inputs_rank is None) or (inputs_rank < 2):
raise ValueError('Inputs must have a least 2 dimensions.')
dims = inputs_shape[1:]
if not dims.is_fully_defined():
raise ValueError('Inputs 2nd dimension must be defined.')
k = dims.num_elements()
outputs = array_ops.reshape(inputs, [-1, k])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _sparse_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
outer_dimensions = inputs.shape[:new_rank - 1]
inner_dimensions = inputs.shape[new_rank - 1:]
new_shape = array_ops.concat(0, (outer_dimensions,
[math_ops.reduce_prod(inner_dimensions)]))
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened
def _dense_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
rank_assertion = check_ops.assert_rank_at_least(
inputs, new_rank, message='inputs has rank less than new_rank')
with ops.control_dependencies([rank_assertion]):
outer_dimensions = array_ops.slice(
array_ops.shape(inputs), [0], [new_rank - 1])
new_shape = array_ops.concat(0, (outer_dimensions, [-1]))
reshaped = array_ops.reshape(inputs, new_shape)
# if `new_rank` is an integer, try to calculate new shape.
if isinstance(new_rank, six.integer_types):
static_shape = inputs.get_shape()
if static_shape is not None and static_shape.dims is not None:
static_shape = static_shape.as_list()
static_outer_dims = static_shape[:new_rank - 1]
static_inner_dims = static_shape[new_rank - 1:]
flattened_dimension = 1
for inner_dim in static_inner_dims:
if inner_dim is None:
flattened_dimension = None
break
flattened_dimension *= inner_dim
reshaped.set_shape(static_outer_dims + [flattened_dimension])
return reshaped
@add_arg_scope
def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
"""Flattens inner dimensions of `inputs`, returns a Tensor with `new_rank`.
For example:
'''
x = tf.random_uniform(shape=[1, 2, 3, 4, 5, 6])
y = _inner_flatten(x, 4)
assert y.get_shape().as_list() == [1, 2, 3, (4 * 5 * 6)]
'''
This layer will fail at run time if `new_rank` is greater than the current
rank of `inputs`.
Args:
inputs: a `Tensor` or `SparseTensor`.
new_rank: the desired rank of the returned `Tensor` or `SparseTensor`.
output_collections: collection to which the outputs will be added.
scope: optional scope for `name_scope`.
Returns:
A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, sparse_tensor.SparseTensor):
flattened = _sparse_inner_flatten(inputs, new_rank)
else:
inputs = ops.convert_to_tensor(inputs)
flattened = _dense_inner_flatten(inputs, new_rank)
return utils.collect_named_outputs(output_collections, sc, flattened)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
the tensor variable representing the result of the series of operations.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
if not (isinstance(num_outputs, int) or isinstance(num_outputs, long)):
raise ValueError('num_outputs should be int or long, got %s.', num_outputs)
with variable_scope.variable_scope(scope, 'fully_connected', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
num_input_units = utils.last_dimension(inputs_shape, min_rank=2)
static_shape = inputs_shape.as_list()
static_shape[-1] = num_outputs
out_shape = array_ops.unpack(array_ops.shape(inputs))
out_shape[-1] = num_outputs
weights_shape = [num_input_units, num_outputs]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
if len(static_shape) > 2:
# Reshape inputs
inputs = array_ops.reshape(inputs, [-1, num_input_units])
outputs = standard_ops.matmul(inputs, weights)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
if len(static_shape) > 2:
# Reshape back outputs
outputs = array_ops.reshape(outputs, array_ops.pack(out_shape))
outputs.set_shape(static_shape)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def layer_norm(inputs,
center=True,
scale=True,
activation_fn=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a Layer Normalization layer from https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor with 2 or more dimensions. The normalization
occurs over all but the first dimension.
center: If True, subtract `beta`. If False, `beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: if rank or last dimension of `inputs` is undefined.
"""
with variable_scope.variable_scope(scope, 'LayerNorm', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
axis = list(range(1, inputs_rank))
params_shape = inputs_shape[-1:]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer,
collections=gamma_collections,
trainable=trainable)
# Calculate the moments on the last axis (layer activations).
mean, variance = nn.moments(inputs, axis, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
variance_epsilon = 1E-12
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If 'kernel_size' is not a 2-D list
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if data_format == DATA_FORMAT_NHWC:
ksize = [1, kernel_h, kernel_w, 1]
strides = [1, stride_h, stride_w, 1]
else:
ksize = [1, 1, kernel_h, kernel_w]
strides = [1, 1, stride_h, stride_w]
outputs = nn.max_pool(inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def pool(inputs,
kernel_size,
pooling_type,
padding='VALID',
data_format=None,
dilation_rate=1,
stride=1,
outputs_collections=None,
scope=None):
# pylint: disable=line-too-long
"""Adds a pooling op.
Args:
inputs: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
kernel_size: Sequence of N ints >= 1. Can also be a single integer to
specify the same value for all spatial dimensions.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, currently the only valid value is "NDHWC".
dilation_rate: Optional. Dilation rate. Sequence of N ints >= 1. Defaults
to [1]*N. Can also be a single integer to specify the same value for all
spatial dimensions. If any value of dilation_rate is > 1, then all values
of stride must be 1.
stride: Optional. Sequence of N ints >= 1. Defaults to [1]*N. Can also be
a single integer to specify the same value for all spatial dimensions. If
any value of stride is > 1, then all values of dilation_rate must be 1.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(scope, '%s_pool' %
(pooling_type.lower()), [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank is None:
raise ValueError('Rank of inputs must be known')
if input_rank < 3:
raise ValueError('Rank of inputs must be >= 3')
num_spatial_dims = input_rank - 2
output = nn.pool(
input=inputs,
window_shape=utils.n_positive_integers(num_spatial_dims, kernel_size),
pooling_type=pooling_type,
padding=padding,
data_format=data_format,
dilation_rate=utils.n_positive_integers(num_spatial_dims,
dilation_rate),
strides=utils.n_positive_integers(num_spatial_dims, stride),
name=sc)
return utils.collect_named_outputs(outputs_collections, sc, output)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using `tf.one_hot`.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
one hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn is not None:
y = activation_fn(y)
ops.add_to_collections(list(output_collections or []) +
[ops.GraphKeys.ACTIVATIONS], y)
return y
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
a tensor result of applying the layer, repetitions times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_scope(scope, 'Repeat', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i+1)
outputs = layer(outputs, *args, **kwargs)
return outputs
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `batch_norm_params` is None,
it adds bias to the result, creating a variable called 'biases', otherwise
it adds a batch normalization layer. It finally applies an activation function
to produce the end result.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_outputs: the number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
depth_multiplier: the number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: a list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: one of 'VALID' or 'SAME'.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionay containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'SeparableConv2d', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w,
num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
strides = [1, stride_h, stride_w, 1]
if num_outputs is not None:
# Full separable convolution: Depthwise followed by pointwise convolution.
pointwise_shape = [1, 1, depth_multiplier * num_filters_in,
num_outputs]
pointwise_weights = variables.model_variable(
'pointwise_weights',
shape=pointwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
outputs = nn.separable_conv2d(inputs,
depthwise_weights,
pointwise_weights,
strides,
padding)
else:
# Depthwise convolution only.
outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding)
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
a `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
predictions.set_shape(logits.get_shape())
return predictions
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_scope(scope, 'Stack', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
@add_arg_scope
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(
math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(array_ops.slice(array_ops.shape(inputs), [dim], [1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(0, multiples)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer,
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\\(\\text{dim}_0, \\text{dim}_1, ..., \\text{dim}_n\\\)]
with more than 2 dimensions (\\\(n > 1\\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`],
where \\\( r_{i_0, ..., i_{n-1}, k} =
\\sum_{0 \\leq j < \\text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\\).
This is accomplished by reshaping `x` to 2-D
[\\\(\\text{dim}_0 \\cdot ... \\cdot \\text{dim}_{n-1}, \\text{dim}_n\\\)]
before the matrix multiply and afterwards reshaping it to
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_scope(name, 'fully_connected', [x]):
x = ops.convert_to_tensor(x)
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(list(weight_collections or []) +
[ops.GraphKeys.GLOBAL_VARIABLES])
w = variable_scope.get_variable('weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,
[-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(list(bias_collections or []) +
[ops.GraphKeys.GLOBAL_VARIABLES])
b = variable_scope.get_variable('bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unpack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.pack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv2d = convolution2d
conv2d_transpose = convolution2d_transpose
conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
| {
"content_hash": "ff7279391bedd97ae42556e10cc937fe",
"timestamp": "",
"source": "github",
"line_count": 2008,
"max_line_length": 86,
"avg_line_length": 44.044322709163346,
"alnum_prop": 0.636345134044165,
"repo_name": "tobegit3hub/deep_cnn",
"id": "e4bcff3d2fb11cd4e81604bc7b3146450b4ba8e1",
"size": "89179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "java_predict_client/src/main/proto/tensorflow/contrib/layers/python/layers/layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2967"
},
{
"name": "C",
"bytes": "88971"
},
{
"name": "C++",
"bytes": "12677340"
},
{
"name": "CMake",
"bytes": "93933"
},
{
"name": "Go",
"bytes": "85550"
},
{
"name": "HTML",
"bytes": "525038"
},
{
"name": "Java",
"bytes": "68271"
},
{
"name": "JavaScript",
"bytes": "12235"
},
{
"name": "Jupyter Notebook",
"bytes": "1833475"
},
{
"name": "Makefile",
"bytes": "23468"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "138762"
},
{
"name": "Python",
"bytes": "13156664"
},
{
"name": "Shell",
"bytes": "256250"
},
{
"name": "TypeScript",
"bytes": "442815"
}
],
"symlink_target": ""
} |
import unittest
import monitoring as mon
class RecordTestCase(unittest.TestCase):
def testRecordValueList(self):
r = mon.Record(values=[1, 2, 3])
self.assertEqual(r.values, [1, 2, 3])
def testRecordSingleValue(self):
r = mon.Record(values=1)
self.assertEqual(r.values, [1])
def testRecordLoggerOrigin(self):
r = mon.Record(values=[1, 2, 3])
self.assertEqual(r.origin, 'logger')
def testRecordSampleOrigin(self):
r = mon.Record(values=1)
self.assertEqual(r.origin, 'sample')
def testRecordTimesList(self):
r = mon.Record(values=[1, 2, 3], startTime=0, interval=10)
self.assertEqual(r.times, [0, 10, 20])
def testRecordEndTime(self):
r = mon.Record(values=[1, 2, 3], startTime=0, interval=10)
self.assertEqual(r.endTime, 20)
def testRecordLength(self):
r = mon.Record(values=[1, 2, 3])
self.assertEqual(len(r), 3)
def testRecordSingleTime(self):
r = mon.Record(values=1, startTime=0)
self.assertEqual(r.times, [0])
def testRecordIrregularInterval(self):
r = mon.Record(values=1)
self.assertEqual(r.interval, -1)
def testRecordIrregularIntervalForce(self):
"""Single-value records always treated as irregular"""
r = mon.Record(values=1, interval=999)
self.assertEqual(r.interval, -1)
def testIntervalStr15Min(self):
r = mon.Record(interval=15)
self.assertEqual(r.intervalStr, '15MIN')
def testIntervalStr59Min(self):
r = mon.Record(interval=59)
self.assertEqual(r.intervalStr, '59MIN')
def testIntervalStr1Hour(self):
r = mon.Record(interval=60)
self.assertEqual(r.intervalStr, '1HOUR')
def testIntervalStr23Hour(self):
r = mon.Record(interval=23*60)
self.assertEqual(r.intervalStr, '23HOUR')
def testIntervalStr1Day(self):
r = mon.Record(interval=24*60)
self.assertEqual(r.intervalStr, '1DAY')
def testIntervalStr2Day(self):
r = mon.Record(interval=24*60*2)
self.assertEqual(r.intervalStr, '2DAY')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5f8dfe83599346927e6b41fb03bfeefe",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 66,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.63,
"repo_name": "jprine/monitoring-module",
"id": "3ffb3721b03fede1677e7eae24eaadb3cbcc3ff9",
"size": "2224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_monitoring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "NSIS",
"bytes": "1585"
},
{
"name": "PowerShell",
"bytes": "615"
},
{
"name": "Python",
"bytes": "316882"
}
],
"symlink_target": ""
} |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
__doc__="""
ATLAS Higgs Machine Learning Challenge 2014
Read CERN Open Data Portal Dataset http://opendata.cern.ch/record/328
and manipulate it
- Label is changd from charcter to integer 0 or 1
- DetailLabel is introduced indicating subpopulations
- systematics effect are simulated
- bkg_weight_norm : manipulates the background weight
- tau_energy_scale : manipulates PRI_tau_pt and recompute other quantities accordingly
Some WARNING : variable DER_mass_MMC is not properly manipulated (modification is linearised),
and I advocate to NOT use DER_mass_MMC when doSystTauEnergyScale is enabled
There is a threshold in the original HiggsML file at 20GeV on PRI_tau_energy.
This threshold is moved when changing sysTauEnergyScale which is unphysicsal.
So if you're going to play with sysTauEnergyScale (within 0.9-1.1),
I suggest you remove events below say 22 GeV *after* the manipulation
applying doSystTauEnergyScale with sysTauENergyScale=1. does NOT yield identical results as not applyield
doSystTauEnergyScale, this is because of rounding error and zero mass approximation.
doSysTauEnerbyScale impacts PRI_tau_pt as well as PRI_met and PRI_met_phi
- so overall I suggest that when playing with doSystTauEnergyScale, the reference is
- not using DER_mass_MMC
- applying *after* this manipulation PRI_tau_pt>22
- run with sysTauENergyScale=1. to have the reference
Author D. Rousseau LAL, Nov 2016
Modification Dec 2016 (V. Estrade):
- Wrap everything into separated functions.
- V4 class now handle 1D-vector values (to improve computation efficiency).
- Fix compatibility with both python 2 and 3.
- Use pandas.DataFrame to ease computation along columns
- Loading function for the base HiggsML dataset (fetch it on the internet if needed)
Refactor March 2017 (V. Estrade):
- Split load function (cleaner)
"""
__version__ = "0.1"
__author__ = "David Rousseau, and Victor Estrade "
import sys
import os
import gzip
import copy
import pandas as pd
import numpy as np
from .download import maybe_download
from .download import get_data_dir
COLUMN_NAMES = {
0: 'PRI_lep_1_pt',
1: 'PRI_lep_1_eta',
2: 'PRI_lep_1_phi',
3: 'PRI_lep_2_pt',
4: 'PRI_lep_2_eta',
5: 'PRI_lep_2_phi',
6: 'PRI_met',
7: 'PRI_met_phi',
8: '8',
9: '9',
10: 'relative_MET',
11: 'axial_MET',
12: 'MMC',
13: 'delta_R',
14: 'delta_eta',
15: '15',
16: '16',
17: 'Pt/Pt',
18: 'invariant_mass_visible',
19: 'invariant_mass_ll',
20: 'delta_phi',
21: 'sphericity',
22: 'transverse_sphericity',
23: '23',
24: '24',
}
RESTRICTED_COLUMNS = [0,1,2,3,4,5,6,7]
COLUMN_RENAME_FOR_SKEWING = {
'PRI_lep_1_pt': 'PRI_tau_pt',
'PRI_lep_1_eta': 'PRI_tau_eta',
'PRI_lep_1_phi': 'PRI_tau_phi',
'PRI_lep_2_pt': 'PRI_lep_pt',
'PRI_lep_2_eta': 'PRI_lep_eta',
'PRI_lep_2_phi': 'PRI_lep_phi',
}
def load_htautau(nrows=None, restricted_cols=True):
url = "http://mlphysics.ics.uci.edu/data/htautau/htautau.txt.gz"
filename = os.path.join(get_data_dir(), "htautau.txt.gz")
maybe_download(filename, url)
if restricted_cols :
data = pd.read_csv(filename, sep='\t', nrows=nrows, header=None, usecols=RESTRICTED_COLUMNS)
else:
data = pd.read_csv(filename, sep='\t', nrows=nrows, header=None)
data.rename(columns=COLUMN_NAMES, inplace=True)
return data
def load_ztautau(nrows=None, restricted_cols=True):
url = "http://mlphysics.ics.uci.edu/data/htautau/ztautau.txt.gz"
filename = os.path.join(get_data_dir(), "ztautau.txt.gz")
maybe_download(filename, url)
if restricted_cols :
data = pd.read_csv(filename, sep='\t', nrows=nrows, header=None, usecols=RESTRICTED_COLUMNS)
else:
data = pd.read_csv(filename, sep='\t', nrows=nrows, header=None)
data.rename(columns=COLUMN_NAMES, inplace=True)
return data
def load_higgstautau(n_samples=None):
if n_samples is None:
data_h = load_htautau()
data_z = load_ztautau()
else:
data_h = load_htautau(nrows=n_samples//2)
data_z = load_ztautau(nrows=n_samples//2)
data_h["Label"] = np.ones(data_h.shape[0])
data_z["Label"] = np.zeros(data_z.shape[0])
data_h["Weight"] = np.ones(data_h.shape[0])/200
data_z["Weight"] = np.ones(data_z.shape[0])
data = pd.concat([data_h, data_z])
return data
# ==================================================================================
# V4 Class and physic computations
# ==================================================================================
class V4:
"""
A simple 4-vector class to ease calculation
"""
px=0
py=0
pz=0
e=0
def __init__(self,apx=0., apy=0., apz=0., ae=0.):
"""
Constructor with 4 coordinates
"""
self.px = apx
self.py = apy
self.pz = apz
self.e = ae
if self.e + 1e-3 < self.p():
raise ValueError("Energy is too small! Energy: {}, p: {}".format(self.e, self.p()))
def copy(self):
return copy.deepcopy(self)
def p2(self):
return self.px**2 + self.py**2 + self.pz**2
def p(self):
return np.sqrt(self.p2())
def pt2(self):
return self.px**2 + self.py**2
def pt(self):
return np.sqrt(self.pt2())
def m(self):
return np.sqrt( np.abs( self.e**2 - self.p2() ) ) # abs is needed for protection
def eta(self):
return np.arcsinh( self.pz/self.pt() )
def phi(self):
return np.arctan2(self.py, self.px)
def deltaPhi(self, v):
"""delta phi with another v"""
return (self.phi() - v.phi() + 3*np.pi) % (2*np.pi) - np.pi
def deltaEta(self,v):
"""delta eta with another v"""
return self.eta()-v.eta()
def deltaR(self,v):
"""delta R with another v"""
return np.sqrt(self.deltaPhi(v)**2+self.deltaEta(v)**2 )
def eWithM(self,m=0.):
"""recompute e given m"""
return np.sqrt(self.p2()+m**2)
# FIXME this gives ugly prints with 1D-arrays
def __str__(self):
return "PxPyPzE( %s,%s,%s,%s)<=>PtEtaPhiM( %s,%s,%s,%s) " % (self.px, self.py,self.pz,self.e,self.pt(),self.eta(),self.phi(),self.m())
def scale(self,factor=1.): # scale
"""Apply a simple scaling"""
self.px *= factor
self.py *= factor
self.pz *= factor
self.e = np.abs( factor*self.e )
def scaleFixedM(self,factor=1.):
"""Scale (keeping mass unchanged)"""
m = self.m()
self.px *= factor
self.py *= factor
self.pz *= factor
self.e = self.eWithM(m)
def setPtEtaPhiM(self, pt=0., eta=0., phi=0., m=0):
"""Re-initialize with : pt, eta, phi and m"""
self.px = pt*np.cos(phi)
self.py = pt*np.sin(phi)
self.pz = pt*np.sinh(eta)
self.e = self.eWithM(m)
def sum(self, v):
"""Add another V4 into self"""
self.px += v.px
self.py += v.py
self.pz += v.pz
self.e += v.e
def __iadd__(self, other):
"""Add another V4 into self"""
try:
self.px += other.px
self.py += other.py
self.pz += other.pz
self.e += other.e
except AttributeError:
# If 'other' is not V4 like object then return special NotImplemented error
return NotImplemented
return self
def __add__(self, other):
"""Add 2 V4 vectors : v3 = v1 + v2 = v1.__add__(v2)"""
copy = self.copy()
try:
copy.px += other.px
copy.py += other.py
copy.pz += other.pz
copy.e += other.e
except AttributeError:
# If 'other' is not V4 like object then return special NotImplemented error
return NotImplemented
return copy
# magic variable
# FIXME : does it really returns sqrt(2) if in dead center ?
def METphi_centrality(aPhi, bPhi, cPhi):
"""
Calculate the phi centrality score for an object to be between two other objects in phi
Returns sqrt(2) if in dead center
Returns smaller than 1 if an object is not between
a and b are the bounds, c is the vector to be tested
"""
# Safely compute and set to zeros results of zero divisions
with np.errstate(divide='ignore', invalid='ignore', over='ignore'):
A = np.true_divide(np.sin(cPhi - aPhi), np.sin(bPhi - aPhi))
A[A == np.inf] = 0
A = np.nan_to_num(A)
B = np.true_divide(np.sin(bPhi - cPhi), np.sin(bPhi - aPhi))
B[B == np.inf] = 0
B = np.nan_to_num(B)
res = (A+B)/np.sqrt(A**2 + B**2)
res[res == np.inf] = 0
res = np.nan_to_num(res)
return res
# another magic variable
def eta_centrality(eta, etaJ1, etaJ2):
"""
Calculate the eta centrality score for an object to be between two other objects in eta
Returns 1 if in dead center
Returns value smaller than 1/e if object is not between
"""
center = (etaJ1 + etaJ2) / 2.
# Safely compute and set to zeros results of zero divisions
with np.errstate(divide='ignore', invalid='ignore'):
width = 1. / (etaJ1 - center)**2
width[width == np.inf] = 0
width = np.nan_to_num(width)
return np.exp(-width * (eta - center)**2)
# ==================================================================================
# Now we enter in the manipulation procedures (everything works on data inplace)
# ==================================================================================
def label_to_float(data):
"""
Transform the string labels to float values.
s -> 1.0
b -> 0.0
Works inplace on the given data !
Args
----
data: the dataset should be a pandas.DataFrame like object.
This function will modify the given data inplace.
"""
if data['Label'].dtype == object:
#copy entry in human usable form
data["Label"] = (data["Label"] == 's').astype("float")
else:
pass
# ==================================================================================
def getDetailLabel(origWeight, Label, num=True):
"""
Given original weight and label,
return more precise label specifying the original simulation type.
Args
----
origWeight: the original weight of the event
Label : the label of the event (can be {"b", "s"} or {0,1})
num: (default=True) if True use the numeric detail labels
else use the string detail labels. You should prefer numeric labels.
Return
------
detailLabel: the corresponding detail label ("W" is the default if not found)
Note : Could be better optimized but this is fast enough.
"""
# prefer numeric detail label
detail_label_num={
57207:0, # Signal
4613:1,
8145:2,
4610:3,
917703: 105, #Z
5127399:111,
4435976:112,
4187604:113,
2407146:114,
1307751:115,
944596:122,
936590:123,
1093224:124,
225326:132,
217575:133,
195328:134,
254338:135,
2268701:300 #T
}
# complementary for W detaillabeldict=200
#previous alphanumeric detail label
detail_lable_str={
57207:"S0",
4613:"S1",
8145:"S2",
4610:"S3",
917703:"Z05",
5127399:"Z11",
4435976:"Z12",
4187604:"Z13",
2407146:"Z14",
1307751:"Z15",
944596:"Z22",
936590:"Z23",
1093224:"Z24",
225326:"Z32",
217575:"Z33",
195328:"Z34",
254338:"Z35",
2268701:"T"
}
if num:
detailLabelDict = detail_label_num
else:
detailLabelDict = detail_label_str
iWeight=int(1e7*origWeight+0.5)
detailLabel = detailLabelDict.get(iWeight, "W") # "W" is the default value if not found
if detailLabel == "W" and (Label != 0 and Label != 'b') :
raise ValueError("ERROR! if not in detailLabelDict sould have Label==1 ({}, {})".format(iWeight,Label))
return detailLabel
def add_detail_label(data, num=True):
"""
Add a 'detailLabel' column with the detailed labels.
Args
----
data: the dataset should be a pandas.DataFrame like object.
This function will modify the given data inplace.
num: (default=True) if True use the numeric detail labels
else use the string detail labels. You should prefer numeric labels.
"""
if "origWeight" in data.columns:
detailLabel = [getDetailLabel(w, label, num=num) for w, label in zip(data["origWeight"], data["Label"])]
else:
detailLabel = [getDetailLabel(w, label, num=num) for w, label in zip(data["Weight"], data["Label"])]
data["detailLabel"] = detailLabel
# ==================================================================================
def bkg_weight_norm(data, systBkgNorm):
"""
Apply a scaling to the weight.
Keeps the previous weights in the 'origWeight' columns
Args
----
data: the dataset should be a pandas.DataFrame like object.
This function will modify the given data inplace.
TODO maybe explain why it scales only the background.
"""
# only a weight manipulation
data["origWeight"] = data["Weight"]
if not "detailLabel" in data.columns:
add_detail_label(data)
# scale the weight, arbitrary but reasonable value
data["Weight"] = ( data["Weight"]*systBkgNorm ).where(data["detailLabel"] == "W", other=data["origWeight"])
# ==================================================================================
# TES : Tau Energy Scale
# ==================================================================================
def tau_energy_scale(data, systTauEnergyScale):
"""
Manipulate one primary input : the PRI_tau_pt and recompute the others values accordingly.
Args
----
data: the dataset should be a pandas.DataFrame like object.
This function will modify the given data inplace.
systTauEnergyScale : the factor applied : PRI_tau_pt <-- PRI_tau_pt * systTauEnergyScale
Notes :
-------
Add 'ORIG_mass_MMC' and 'ORIG_sum_pt' columns.
Recompute :
- PRI_tau_pt
- PRI_met
- PRI_met_phi
- DER_mass_transverse_met_lep
- DER_mass_vis
- DER_pt_h
- DER_deltar_tau_lep
- DER_pt_ratio_lep_tau
- DER_met_phi_centrality
Round up to 3 decimals.
"""
# scale tau energy scale, arbitrary but reasonable value
data["PRI_tau_pt"] *= systTauEnergyScale
# now recompute the DER quantities which are affected
# first built 4-vectors
vtau = V4() # tau 4-vector
vtau.setPtEtaPhiM(data["PRI_tau_pt"], data["PRI_tau_eta"], data["PRI_tau_phi"], 0.8) # tau mass 0.8 like in original
vlep = V4() # lepton 4-vector
vlep.setPtEtaPhiM(data["PRI_lep_pt"], data["PRI_lep_eta"], data["PRI_lep_phi"], 0.) # lep mass 0 (either 0.106 or 0.0005 but info is lost)
vmet = V4() # met 4-vector
vmet.setPtEtaPhiM(data["PRI_met"], 0., data["PRI_met_phi"], 0.) # met mass zero,
# fix MET according to tau pt change
vtauDeltaMinus = vtau.copy()
vtauDeltaMinus.scaleFixedM( (1.-systTauEnergyScale)/systTauEnergyScale )
vmet += vtauDeltaMinus
vmet.pz = 0.
vmet.e = vmet.eWithM(0.)
data["PRI_met"] = vmet.pt()
data["PRI_met_phi"] = vmet.phi()
# compute many vector sum
vtransverse = V4()
vtransverse.setPtEtaPhiM(vlep.pt(), 0., vlep.phi(), 0.) # just the transverse component of the lepton
vtransverse += vmet
data["DER_mass_transverse_met_lep"] = vtransverse.m()
vltau = vlep + vtau # lep + tau
data["DER_mass_vis"] = vltau.m()
vlmet = vlep + vmet # lep + met # Seems to be unused ?
vltaumet = vltau + vmet # lep + tau + met
data["DER_pt_h"] = vltaumet.pt()
data["DER_deltar_tau_lep"] = vtau.deltaR(vlep)
data["DER_pt_ratio_lep_tau"] = vlep.pt()/vtau.pt()
data["DER_met_phi_centrality"] = METphi_centrality(data["PRI_lep_phi"], data["PRI_tau_phi"], data["PRI_met_phi"])
# delete non trivial objects to save memory (useful?)
# del vtau, vlep, vmet, vlmet, vltau, vltaumet
# Fix precision to 3 decimals
DECIMALS = 3
data["PRI_tau_pt"] = data["PRI_tau_pt"].round(decimals=DECIMALS)
data["PRI_tau_eta"] = data["PRI_tau_eta"].round(decimals=DECIMALS)
data["PRI_tau_phi"] = data["PRI_tau_phi"].round(decimals=DECIMALS)
data["PRI_lep_pt"] = data["PRI_lep_pt"].round(decimals=DECIMALS)
data["PRI_lep_eta"] = data["PRI_lep_eta"].round(decimals=DECIMALS)
data["PRI_lep_phi"] = data["PRI_lep_phi"].round(decimals=DECIMALS)
data["PRI_met"] = data["PRI_met"].round(decimals=DECIMALS)
data["PRI_met_phi"] = data["PRI_met_phi"].round(decimals=DECIMALS)
data["DER_mass_transverse_met_lep"] = data["DER_mass_transverse_met_lep"].round(decimals=DECIMALS)
data["DER_mass_vis"] = data["DER_mass_vis"].round(decimals=DECIMALS)
data["DER_pt_h"] = data["DER_pt_h"].round(decimals=DECIMALS)
data["DER_deltar_tau_lep"] = data["DER_deltar_tau_lep"].round(decimals=DECIMALS)
data["DER_pt_ratio_lep_tau"] = data["DER_pt_ratio_lep_tau"].round(decimals=DECIMALS)
data["DER_met_phi_centrality"] = data["DER_met_phi_centrality"].round(decimals=DECIMALS)
# ==================================================================================
# MAIN : here is define the behaviour of this module as a main script
# ==================================================================================
import argparse
def parse_args():
"""
ArgumentParser.
Return
------
args: the parsed arguments.
"""
# First create a parser with a short description of the program.
# The parser will automatically handle the usual stuff like the --help messages.
parser = argparse.ArgumentParser(
description="Higgs manipulation script. Can be used to produce new dataset with skewed features."
"Handle : Backgroung Weight norm (--Wnorm) and Tau energy scaling (--tes).\n"
"To get the behaviour of the previous version use :\n"
"--Wnorm 1 --tes 1.05 -r --csv -o atlas-higgs-challenge-2014-v2_v2.5_manip.csv")
# ====================================================================================
# Real things here
# ====================================================================================
parser.add_argument("--quiet", "-q", help="Verbosity level", action="store_true", dest='quiet')
parser.add_argument("-r", "--restricted",
help="Option flag to write only the needed columns instead of keeping all the information."
"Same behavior as previous version.",
action="store_true", dest='restricted')
parser.add_argument("--csv", help="Option flag to prevent compression into gzip file.",
action="store_true", dest='csv')
parser.add_argument("--float-label", help="Option flag to convert labels to float.",
action="store_true", dest='float_label')
# parser.add_argument('--Wnorm', help='Backgroung Weight norm scale factor', type=float, dest='w_scale')
parser.add_argument('--tes', help='Tau energy scale factor. Reasonable value [0.9, 1.1]', type=float, dest='tes')
parser.add_argument('-i1',
help='the name of the first input file', dest="in_file1")
parser.add_argument('-i2',
help='the name of the second input file', dest="in_file2")
parser.add_argument('-o', default="data/Higgs_output.csv",
help='the name of the output file', dest="out_file")
# Now do your job and parse my command line !
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
quiet = args.quiet # quiet flag
restricted = args.restricted # restricted flag
float_label = args.float_label # label to float flag
csv = args.csv # csv flag
#w_scale = args.w_scale # Weight scaling factor
tes = args.tes # Tau energy scale factor
in_file1 = args.in_file1 # first input file
in_file2 = args.in_file2 # second input file
out_file = args.out_file # output file
# Restrict the column output (for compatibility with previous version)
columns = [ "EventId",
"Label",
"Weight",
"PRI_tau_pt",
"PRI_tau_eta",
"PRI_tau_phi",
"PRI_lep_pt",
"PRI_lep_eta",
"PRI_lep_phi",
"PRI_met",
"PRI_met_phi",
#"DER_mass_MMC",
"DER_mass_transverse_met_lep",
"DER_mass_vis",
"DER_pt_h",
#"DER_deltaeta_jet_jet",
#"DER_mass_jet_jet",
#"DER_prodeta_jet_jet",
#"DER_deltar_tau_lep",
#"DER_pt_tot",
#"DER_sum_pt",
"DER_pt_ratio_lep_tau",
"DER_met_phi_centrality",
#"DER_lep_eta_centrality",
#"PRI_met_sumet",
#"PRI_jet_num",
#"PRI_jet_leading_pt",
#"PRI_jet_leading_eta",
#"PRI_jet_leading_phi",
#"PRI_jet_subleading_pt",
#"PRI_jet_subleading_eta",
#"PRI_jet_subleading_phi",
#"PRI_jet_all_pt",
#"Weight",
#"origWeight",
#"detailLabel",
] if restricted else None
if not quiet:
print("Loading the dataset")
data1 = load_higgs(in_file1)
#print ("DATA 1 lecture")
print (data1)
data2 = load_higgs(in_file2)
data1["Label"] = add_Label1(data1)
#print ("DATA 1 après")
#print (data1)
data2["Label"] = add_Label2(data2)
data1["Weight"] = add_Weight1(data1)
data2["Weight"] = add_Weight2(data2)
if float_label:
if not quiet:
print("Label convertion to float")
label_to_float(data1)
label_to_float(data2)
#if w_scale is not None:
# if not quiet:
# print("Weight rescaling :", w_scale)
# bkg_weight_norm(data, w_scale)
if tes is not None:
if not quiet:
print("Tau energy rescaling :", tes)
tau_energy_scale(data1, tes)
tau_energy_scale(data2, tes)
#print ("DATA 1")
#print (data1)
#print ("DATA 2")
#print (data2)
frames = [data1,data2]
data = pd.concat(frames)
#print (data)
from random import sample
# given data frame data, create random index
import random
random.seed(3141526)
rindex = np.array(sample(xrange(len(data1)), len(data1)))
#print (rindex)
print (len(rindex))
data_randomised = data.ix[rindex]
data_randomised["EventId"] = add_EventId(data_randomised)
compression = None if csv else "gzip"
_, ext = os.path.splitext(out_file)
if csv and ext != ".csv":
out_file += ".csv"
elif not csv and ext != ".gz":
out_file += ".gz"
if not quiet:
print("Writting results to :", out_file)
#data.to_csv(out_file, index=False, compression=compression, columns=columns)
data_randomised.to_csv(out_file, index=False, compression=compression, columns=columns)
print("Done.")
| {
"content_hash": "adcc446a47e71454b8a2179bd4ea3a38",
"timestamp": "",
"source": "github",
"line_count": 688,
"max_line_length": 142,
"avg_line_length": 34.729651162790695,
"alnum_prop": 0.5682179626684524,
"repo_name": "victor-estrade/datawarehouse",
"id": "e7f64c66368c013525fea5e0d5fcc25193b78476",
"size": "23941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datawarehouse/higgstautau.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61311"
}
],
"symlink_target": ""
} |
import __builtin__
import gdbm
import errno
import hashlib
import os
import os.path
import unittest
import sys
from mock import call, MagicMock, mock_open, patch
from StringIO import StringIO
from shardpile import HashDb
class FilesMock(object):
class File(object):
content = 'content'
def __init__(self, mtime, size):
self.mtime = mtime
self.size = size
def __init__(self):
self.files = {}
self.relpath = os.path.relpath
self.path = patch('os.path')
self.open = patch('__builtin__.open', mock_open(), create=True)
def __enter__(self):
self.open.start()
__builtin__.open = MagicMock(side_effect=self.mock_file)
self.path.start()
os.path.exists = MagicMock(side_effect=self.exists)
os.path.getmtime = MagicMock(side_effect=self.getmtime)
os.path.getsize = MagicMock(side_effect=self.getsize)
os.path.isfile = MagicMock(side_effect=self.isfile)
os.path.join = MagicMock(side_effect=self.join)
os.path.relpath = self.relpath
return self
def __exit__(self, type, value, traceback):
self.path.stop()
self.open.stop()
def add_file(self, path, mtime, size):
self.files[path] = self.File(mtime, size)
def mock_file(self, filename, mode):
def read(size):
file_mock.read.side_effect = lambda size: ''
return self.File.content
file_mock = MagicMock()
file_mock.__enter__.return_value = file_mock
file_mock.read.side_effect = read
return file_mock
def exists(self, path):
return path in self.files
def getmtime(self, path):
try:
return self.files[path].mtime
except KeyError:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
def getsize(self, path):
try:
return self.files[path].size
except KeyError:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path)
def isfile(self, path):
return path in self.files
def join(self, *args):
return '/'.join(args)
class HashDbTest(unittest.TestCase):
def setUp(self):
self.data = {
'path/to/somefile':
'1366207797;1024;6cf9224c0ced0affde6832a101676ff656a7cd6f',
'path/to/anotherfile':
'1366207797;1024;040f06fd774092478d450774f5ba30c5da78acc8'
}
def data_delitem(key):
del self.data[key]
def data_getitem(key):
return self.data[key]
def data_setitem(key, value):
self.data[key] = value
def data_nextkey(key):
key_iter = iter(self.data)
while key_iter.next() != key:
pass
return key_iter.next()
self.dbmock = MagicMock()
self.dbmock.__delitem__.side_effect = data_delitem
self.dbmock.__getitem__.side_effect = data_getitem
self.dbmock.__setitem__.side_effect = data_setitem
self.dbmock.__len__.side_effect = lambda: len(self.data)
self.dbmock.firstkey.side_effect = lambda: self.data.iterkeys().next()
self.dbmock.nextkey.side_effect = data_nextkey
self.gdbm_mock = MagicMock(spec_set=gdbm)
self.gdbm_mock.open.return_value = self.dbmock
self.hashdb = HashDb('<filename>', self.gdbm_mock)
def test_can_be_used_in_with(self):
with FilesMock() as files:
files.add_file('./path/to/somefile', 1366207797, 1024)
with HashDb('<filename>', self.gdbm_mock) as db:
db.update_path('.', 'path/to/somefile')
def test_allows_iteration(self):
keys = ['0', '1', '2', None]
self.dbmock.firstkey.side_effect = lambda: keys[0]
self.dbmock.nextkey.side_effect = lambda key: keys[keys.index(key) + 1]
for key in self.hashdb:
self.assertTrue(key in keys, 'key = %s' % key)
def test_has_length(self):
self.assertEqual(len(self.hashdb), len(self.data))
def test_provides_dictionary_interface(self):
entry = self.hashdb['path/to/somefile']
self.assertEqual(entry.modification, 1366207797)
self.assertEqual(entry.size, 1024)
self.assertEqual(
entry.sha1, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
with self.assertRaises(KeyError):
entry = self.hashdb['newpath']
self.hashdb['newpath'] = HashDb.Entry(
12345, 256, '07d307d64e062a0ba2ed725571aecd89f2214232')
self.assertEqual(
self.data['newpath'],
'12345;256;07d307d64e062a0ba2ed725571aecd89f2214232')
def test_allows_deletion_of_entries(self):
del self.hashdb['path/to/somefile']
self.assertFalse('path/to/somefile' in self.hashdb)
def test_inserts_hash_for_new_file(self):
with FilesMock() as files:
files.add_file('./newfile', 123, 42)
self.hashdb.update_path('.', 'newfile')
expected = HashDb.Entry(
123, 42, hashlib.sha1(FilesMock.File.content).hexdigest())
self.assertEqual(self.hashdb['newfile'], expected)
def test_reads_complete_file(self):
with FilesMock() as files:
with patch('__builtin__.open', mock_open()) as open_patch:
chunks = ['con', 'tent', '']
def read_chunk(size):
return chunks.pop(0)
file_mock = MagicMock()
file_mock.__enter__.return_value = file_mock
file_mock.read.side_effect = read_chunk
open_patch.return_value = file_mock
files.add_file('./newfile', 123, 42)
self.hashdb.update_path('.', 'newfile')
expected = HashDb.Entry(
123, 42, hashlib.sha1('content').hexdigest())
self.assertEqual(self.hashdb['newfile'], expected)
def test_updates_hash_if_modification_time_changed(self):
with FilesMock() as files:
files.add_file('./path/to/somefile', 123, 1024)
self.hashdb.update_path('.', 'path/to/somefile')
expected = HashDb.Entry(
123, 1024, hashlib.sha1(FilesMock.File.content).hexdigest())
self.assertEqual(self.hashdb['path/to/somefile'], expected)
def test_updates_hash_if_size_changed(self):
with FilesMock() as files:
files.add_file('./path/to/somefile', 1366207797, 42)
self.hashdb.update_path('.', 'path/to/somefile')
expected = HashDb.Entry(
1366207797, 42, hashlib.sha1(FilesMock.File.content).hexdigest())
self.assertEqual(self.hashdb['path/to/somefile'], expected)
def test_update_uses_relative_path(self):
with FilesMock() as files:
files.add_file('path/to/somefile', 1366207797, 42)
self.hashdb.update_path('path', 'to/somefile')
expected = HashDb.Entry(
1366207797, 42, hashlib.sha1(FilesMock.File.content).hexdigest())
self.assertEqual(self.hashdb['to/somefile'], expected)
def test_does_not_update_hash_if_modification_and_size_unchanged(self):
with FilesMock() as files:
files.add_file('./path/to/somefile', 1366207797, 1024)
self.hashdb.update_path('.', 'path/to/somefile')
expected = HashDb.Entry(
1366207797, 1024, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
self.assertEqual(self.hashdb['path/to/somefile'], expected)
def test_update_path_throws_exception_for_non_existing_files(self):
with FilesMock() as files:
files.add_file('./existent', 1, 1)
with self.assertRaises(OSError) as cm:
self.hashdb.update_path('.', 'nonexistent')
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_can_update_all_paths_in_tree(self):
with patch('os.walk') as walk:
dirpath = 'dir'
walk.return_value = [
(dirpath, ['a', 'b'], ['file0', 'file1']),
(os.path.join(dirpath, 'a'), [], ['file2']),
(os.path.join(dirpath, 'b'), [], ['file3', 'file4'])]
with patch.object(self.hashdb, 'update_path') as update_path:
self.hashdb.update_tree(dirpath)
expected = [call(dirpath, f) for f in [
'file0', 'file1', os.path.join('a', 'file2'),
os.path.join('b', 'file3'), os.path.join('b', 'file4')]]
self.assertEqual(
len(update_path.call_args_list), len(expected))
for c in update_path.call_args_list:
self.assertIn(c, expected)
def test_can_exclude_patterns_in_update_tree(self):
with patch('os.walk') as walk:
dirpath = 'dir'
walk.return_value = [
(dirpath, ['a', 'b'], ['file0', 'exclude1']),
(os.path.join(dirpath, 'a'), [], ['exclude2']),
(os.path.join(dirpath, 'b'), [], ['file3', 'file4'])]
with patch.object(self.hashdb, 'update_path') as update_path:
self.hashdb.update_tree(dirpath, exclude=r'exclude\d')
expected = [call(dirpath, f) for f in [
'file0', os.path.join('b', 'file3'),
os.path.join('b', 'file4')]]
self.assertEqual(
len(update_path.call_args_list), len(expected))
for c in update_path.call_args_list:
self.assertIn(c, expected)
def test_update_all_prints_walk_errors_and_continues(self):
with patch('os.walk') as walk:
dirpath = 'dir'
def test_error_handler(path, onerror):
with patch('sys.stderr') as stderr:
buffer = StringIO()
stderr.write = buffer.write
onerror(OSError(
errno.EPERM, 'Permission denied.', 'somedir'))
self.assertEqual(
buffer.getvalue(),
sys.argv[0] + ": somedir: Permission denied.\n")
return [(dirpath, [], [])]
walk.side_effect = test_error_handler
self.hashdb.update_tree(dirpath)
def test_update_all_prints_update_path_errors_and_continues(self):
with patch('os.walk') as walk:
dirpath = 'dir'
walk.return_value = [(dirpath, [], ['file'])]
with patch('sys.stderr') as stderr:
buffer = StringIO()
stderr.write = buffer.write
with patch.object(self.hashdb, 'update_path') as update_path:
update_path.side_effect = OSError(
errno.EPERM, 'Permission denied.', 'file')
self.hashdb.update_tree(dirpath)
self.assertEqual(
buffer.getvalue(),
sys.argv[0] + ": file: Permission denied.\n")
def test_strip_deletes_hashes_for_nonexistent_files(self):
with FilesMock() as files:
files.add_file('path/to/somefile', 123, 1024)
self.hashdb.strip()
self.assertIn('path/to/somefile', self.hashdb)
self.assertNotIn('path/to/anotherfile', self.hashdb)
def test_verify_tree(self):
self.data['path/to/missingOnDisk'] = \
'1366207797;1024;040f06fd774092478d450774f5ba30c5da78acc8'
with patch('os.walk') as walk:
dirpath = 'dir'
walk.return_value = [
(dirpath, ['path'], []),
(os.path.join(dirpath, 'path'), ['to'], ['missingInDb']),
(os.path.join(dirpath, 'path', 'to'), [], ['somefile']),
(os.path.join(dirpath, 'path', 'to'), [], ['anotherfile'])]
with FilesMock() as files:
files.add_file('dir/path/to/somefile', 1, 1)
files.add_file('dir/path/to/anotherfile', 1, 1)
changed, missing_in_db, missing_on_disk = \
self.hashdb.verify_tree(dirpath)
self.assertEqual(changed, ['path/to/somefile'])
self.assertEqual(missing_in_db, ['path/missingInDb'])
self.assertEqual(missing_on_disk, ['path/to/missingOnDisk'])
def test_verify_tree_can_exclude_patterns(self):
self.data['path/to/exclude3'] = \
'1366207797;1024;040f06fd774092478d450774f5ba30c5da78acc8'
with patch('os.walk') as walk:
dirpath = 'dir'
walk.return_value = [
(dirpath, ['path'], []),
(os.path.join(dirpath, 'path'), ['to'], ['exclude1']),
(os.path.join(dirpath, 'path', 'to'), [], ['exclude2']),
(os.path.join(dirpath, 'path', 'to'), [], ['anotherfile'])]
with FilesMock() as files:
files.add_file('dir/path/to/exclude2', 1, 1)
files.add_file('dir/path/to/anotherfile', 1, 1)
changed, missing_in_db, missing_on_disk = \
self.hashdb.verify_tree(
dirpath, exclude=r'exclude\d|somefile')
self.assertEqual(changed, [])
self.assertEqual(missing_in_db, [])
self.assertEqual(missing_on_disk, [])
def test_verify_tree_prints_walk_errors_and_continues(self):
with patch('os.walk') as walk:
dirpath = 'dir'
def test_error_handler(path, onerror):
with patch('sys.stderr') as stderr:
buffer = StringIO()
stderr.write = buffer.write
onerror(OSError(
errno.EPERM, 'Permission denied.', 'somedir'))
self.assertEqual(
buffer.getvalue(),
sys.argv[0] + ": somedir: Permission denied.\n")
return [(dirpath, [], [])]
walk.side_effect = test_error_handler
self.hashdb.verify_tree(dirpath)
def test_verify_tree_prints_raised_errors_and_continues(self):
with patch('os.walk') as walk:
dirpath = 'dir'
walk.return_value = [(dirpath, [], ['file'])]
with patch('sys.stderr') as stderr:
buffer = StringIO()
stderr.write = buffer.write
with patch('os.path.join') as path_join:
path_join.side_effect = OSError(
errno.EPERM, 'Permission denied.', 'file')
self.hashdb.verify_tree(dirpath)
self.assertTrue(buffer.getvalue().startswith(
sys.argv[0] + ": file: Permission denied.\n"))
class HashDbEntryTest(unittest.TestCase):
def test_equal_entries_considered_equal(self):
a = HashDb.Entry(123, 456, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
b = HashDb.Entry(123, 456, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
self.assertEqual(a, b)
def test_entries_differing_in_modification_time_are_unequal(self):
a = HashDb.Entry(123, 456, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
b = HashDb.Entry(23, 456, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
self.assertNotEqual(a, b)
def test_entries_differing_in_size_are_unequal(self):
a = HashDb.Entry(123, 456, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
b = HashDb.Entry(123, 23, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
self.assertNotEqual(a, b)
def test_entries_differing_in_sha1_are_unequal(self):
a = HashDb.Entry(123, 456, '6cf9224c0ced0affde6832a101676ff656a7cd6f')
b = HashDb.Entry(123, 456, '07d307d64e062a0ba2ed725571aecd89f2214232')
self.assertNotEqual(a, b)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8d181751d5ab8ba7e60de46e1cf2a979",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 79,
"avg_line_length": 39.665829145728644,
"alnum_prop": 0.5682523595363274,
"repo_name": "jgosmann/shardpile",
"id": "4d6818264e23dcc26b6f139316e75713fe7bbd19",
"size": "15810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test-shardpile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23633"
}
],
"symlink_target": ""
} |
import amsoil.core.pluginmanager as pm
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import logging
Base = declarative_base()
class Aggregate(Base):
__tablename__ = 'sa_aggregate'
id = Column(Integer, primary_key=True)
slice_urn = Column(String)
aggregate_url = Column(String)
# Simple class to hold the basis of DB queries against GPO CH Database
# Loads the database table schemas
class CHDatabaseEngine:
# Grab a database engine and pre-fetch table meta-data for all
# Tables we'll be using
def __init__(self):
config = pm.getService('config')
self.db_url = config.get('chrm.db_url')
# , pool_size=20 seems possible - default is 5. Haven't seend a need to go higher.
self.db = create_engine(self.db_url)
# FIXME: Make this log level a chapi config param
# logging.getLogger('sqlalchemy').setLevel(logging.INFO)
self.session_class = sessionmaker(bind=self.db)
self.metadata = MetaData(self.db)
Base.metadata.create_all(self.db)
self.SLICE_TABLE = Table('sa_slice', self.metadata, autoload=True)
self.SLICE_MEMBER_TABLE = \
Table('sa_slice_member', self.metadata, autoload=True)
self.SLIVER_INFO_TABLE = \
Table('sa_sliver_info', self.metadata, autoload=True)
self.PROJECT_TABLE = Table('pa_project', self.metadata, autoload=True)
self.PROJECT_MEMBER_TABLE = \
Table('pa_project_member', self.metadata, autoload=True)
self.PROJECT_ATTRIBUTE_TABLE = \
Table('pa_project_attribute', self.metadata, autoload=True)
self.MEMBER_TABLE = \
Table('ma_member', self.metadata, autoload=True)
self.MEMBER_ATTRIBUTE_TABLE = \
Table('ma_member_attribute', self.metadata, autoload=True)
self.SSH_KEY_TABLE = \
Table('ma_ssh_key', self.metadata, autoload=True)
self.OUTSIDE_CERT_TABLE = \
Table('ma_outside_cert', self.metadata, autoload=True)
self.INSIDE_KEY_TABLE = \
Table('ma_inside_key', self.metadata, autoload=True)
self.CS_ACTION_TABLE = Table('cs_action', self.metadata, \
autoload=True)
self.CS_ATTRIBUTE_TABLE = Table('cs_attribute', self.metadata, \
autoload=True)
self.CS_POLICY_TABLE = Table('cs_policy', self.metadata, \
autoload=True)
self.SERVICES_TABLE = \
Table('service_registry', self.metadata, autoload=True)
self.SERVICE_ATTRIBUTE_TABLE = \
Table('service_registry_attribute', self.metadata, autoload=True)
self.ROLE_TABLE = Table('cs_attribute', self.metadata, autoload=True)
self.LOGGING_ENTRY_TABLE = Table('logging_entry', \
self.metadata, autoload=True)
self.LOGGING_ENTRY_ATTRIBUTE_TABLE = \
Table('logging_entry_attribute', \
self.metadata, autoload=True)
self.MA_CLIENT_TABLE = Table('ma_client', self.metadata, autoload=True)
self.PROJECT_REQUEST_TABLE = Table('pa_project_member_request', \
self.metadata, autoload=True)
self.PROJECT_INVITATION_TABLE = Table('pa_project_member_invitation', \
self.metadata, autoload=True)
# Get a new session on the database engine
def getSession(self):
return self.session_class()
| {
"content_hash": "8d53a8564895f141b8d6a5482491edc8",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 90,
"avg_line_length": 45.4,
"alnum_prop": 0.6093061674008811,
"repo_name": "ahelsing/geni-ch",
"id": "ae4978ab176e9728b2e796345537097190f9e445",
"size": "5377",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "plugins/chrm/CHDatabaseEngine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "999"
},
{
"name": "Makefile",
"bytes": "1539"
},
{
"name": "Python",
"bytes": "627979"
},
{
"name": "Shell",
"bytes": "17153"
}
],
"symlink_target": ""
} |
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.fishing import FishGlobals
import GardenGlobals
from direct.actor import Actor
import random
class DirectRegion(NodePath):
notify = DirectNotifyGlobal.directNotify.newCategory('DirectRegion')
def __init__(self, parent=aspect2d):
NodePath.__init__(self)
self.assign(parent.attachNewNode('DirectRegion'))
def destroy(self):
self.unload()
self.parent = None
return
def setBounds(self, *bounds):
self.bounds = bounds
def setColor(self, *colors):
self.color = colors
def show(self):
pass
def hide(self):
NodePath.hide(self)
def load(self):
if not hasattr(self, 'cRender'):
self.cRender = NodePath('fishSwimRender')
self.fishSwimCamera = self.cRender.attachNewNode('fishSwimCamera')
self.cCamNode = Camera('fishSwimCam')
self.cLens = PerspectiveLens()
self.cLens.setFov(40, 40)
self.cLens.setNear(0.1)
self.cLens.setFar(100.0)
self.cCamNode.setLens(self.cLens)
self.cCamNode.setScene(self.cRender)
self.fishSwimCam = self.fishSwimCamera.attachNewNode(self.cCamNode)
cm = CardMaker('displayRegionCard')
apply(cm.setFrame, self.bounds)
self.card = card = self.attachNewNode(cm.generate())
apply(card.setColor, self.color)
newBounds = card.getTightBounds()
ll = render2d.getRelativePoint(card, newBounds[0])
ur = render2d.getRelativePoint(card, newBounds[1])
newBounds = [ll.getX(),
ur.getX(),
ll.getZ(),
ur.getZ()]
newBounds = map(lambda x: max(0.0, min(1.0, (x + 1.0) / 2.0)), newBounds)
self.cDr = base.win.makeDisplayRegion(*newBounds)
self.cDr.setSort(10)
self.cDr.setClearColor(card.getColor())
self.cDr.setClearDepthActive(1)
self.cDr.setClearColorActive(1)
self.cDr.setCamera(self.fishSwimCam)
return self.cRender
def unload(self):
if hasattr(self, 'cRender'):
base.win.removeDisplayRegion(self.cDr)
del self.cRender
del self.fishSwimCamera
del self.cCamNode
del self.cLens
del self.fishSwimCam
del self.cDr
class SpecialsPhoto(NodePath):
notify = DirectNotifyGlobal.directNotify.newCategory('SpecialsPhoto')
def __init__(self, type = None, parent = aspect2d):
NodePath.__init__(self)
self.assign(parent.attachNewNode('SpecialsPhoto'))
self.type = type
self.actor = None
self.sound = None
self.soundTrack = None
self.track = None
self.specialsFrame = None
return
def destroy(self):
self.hide()
if hasattr(self, 'background'):
self.background.destroy()
del self.background
if hasattr(self, 'specialsFrame') and hasattr(self.specialsFrame, 'destroy'):
self.specialsFrame.destroy()
if hasattr(self, 'toonStatuary'):
if self.toonStatuary.toon:
self.toonStatuary.deleteToon()
self.type = None
del self.soundTrack
del self.track
self.parent = None
return
def update(self, type):
self.type = type
def setBackBounds(self, *bounds):
self.backBounds = bounds
def setBackColor(self, *colors):
self.backColor = colors
def load(self):
pass
def makeSpecialsFrame(self, actor):
actor.setDepthTest(1)
actor.setDepthWrite(1)
if not hasattr(self, 'specialsDisplayRegion'):
self.specialsDisplayRegion = DirectRegion(parent=self)
apply(self.specialsDisplayRegion.setBounds, self.backBounds)
apply(self.specialsDisplayRegion.setColor, self.backColor)
frame = self.specialsDisplayRegion.load()
pitch = frame.attachNewNode('pitch')
rotate = pitch.attachNewNode('rotate')
scale = rotate.attachNewNode('scale')
actor.reparentTo(scale)
bMin, bMax = actor.getTightBounds()
center = (bMin + bMax) / 2.0
actor.setPos(-center[0], -center[1], -center[2])
pitch.setY(2.5)
return frame
def loadModel(self, specialsIndex):
if specialsIndex == -1:
nodePath = self.attachNewNode('blank')
return nodePath
if specialsIndex >= 105 and specialsIndex <= 108:
from toontown.estate import DistributedToonStatuary
self.toonStatuary = DistributedToonStatuary.DistributedToonStatuary(None)
self.toonStatuary.setupStoneToon(base.localAvatar.style)
self.toonStatuary.poseToonFromSpecialsIndex(specialsIndex)
self.toonStatuary.toon.setH(180)
pedestalModelPath = GardenGlobals.Specials[specialsIndex]['photoModel']
pedestal = loader.loadModel(pedestalModelPath)
self.toonStatuary.toon.reparentTo(pedestal)
pedestal.setScale(GardenGlobals.Specials[specialsIndex]['photoScale'] * 0.5)
return pedestal
elif specialsIndex == 135:
model = Actor.Actor()
modelPath = GardenGlobals.Specials[specialsIndex]['photoModel']
anims = GardenGlobals.Specials[specialsIndex]['photoAnimation']
animPath = modelPath + anims[1]
model.loadModel(modelPath + anims[0])
model.loadAnims(dict([[anims[1], animPath]]))
frameNo = random.randint(1, 2)
model.pose(anims[1], 1)
model.setScale(GardenGlobals.Specials[specialsIndex]['photoScale'] * 0.1)
return model
else:
modelName = GardenGlobals.Specials[specialsIndex]['photoModel']
nodePath = loader.loadModel(modelName)
desat = None
colorTuple = (1, 1, 1)
if desat and not desat.isEmpty():
desat.setColorScale(colorTuple[0], colorTuple[1], colorTuple[2], 1.0)
else:
nodePath.setColorScale(colorTuple[0], colorTuple[1], colorTuple[2], 1.0)
nodePath.setScale(GardenGlobals.Specials[specialsIndex]['photoScale'] * 0.5)
return nodePath
return
def show(self, showBackground = 0):
self.notify.debug('show')
messenger.send('wakeup')
if self.specialsFrame:
if hasattr(self.actor, 'cleanup'):
self.actor.cleanup()
if hasattr(self, 'specialsDisplayRegion'):
self.specialsDisplayRegion.unload()
self.hide()
if self.type != -1:
self.actor = self.loadModel(self.type)
self.specialsFrame = self.makeSpecialsFrame(self.actor)
if showBackground:
if not hasattr(self, 'background'):
background = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
background = background.find('**/Fish_BG')
self.background = background
self.background.setPos(0, 15, 0)
self.background.setScale(11)
self.background.reparentTo(self.specialsFrame)
def hide(self):
NodePath.hide(self)
if hasattr(self, 'specialsDisplayRegion'):
self.specialsDisplayRegion.unload()
if hasattr(self, 'background'):
self.background.hide()
if self.actor:
if hasattr(self.actor, 'stop'):
self.actor.stop()
self.actor.hide()
if self.sound:
self.sound.stop()
self.sound = None
if self.soundTrack:
self.soundTrack.pause()
self.soundTrack = None
if self.track:
self.track.pause()
self.track = None
if hasattr(self, 'toonStatuary'):
if self.toonStatuary.toon:
self.toonStatuary.deleteToon()
return
def changeVariety(self, variety):
self.variety = variety
| {
"content_hash": "d4e673f697ecaa5bfdd00b3f105d891f",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 88,
"avg_line_length": 37.199095022624434,
"alnum_prop": 0.6015083323196692,
"repo_name": "linktlh/Toontown-journey",
"id": "0daa135a2ac8e093e76068d847a2057ff41121b5",
"size": "8221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/estate/SpecialsPhoto.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
''' Miscellaneous helpers
I cannot come up with a good group at the moment..
'''
from collections import Counter
import operator
def avg(l):
return sum(l) / float(len(l))
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def count_elements(l):
'''Count the number of reoccuring elements in a list'''
return Counter(l)
def get_dict_max(d):
'''Returns the key of the item in
d with the highest value'''
return max(d, key=d.get)
def get_dict_min(d):
'''Returns the key of the item in
d with the lowest value'''
return min(d, key=d.get)
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def flatten(l):
return [item for sublist in l for item in sublist]
| {
"content_hash": "54620b8b7678c19d9ec48798e2796293",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 59,
"avg_line_length": 18.5,
"alnum_prop": 0.6169212690951822,
"repo_name": "qfma/ohnolog-dc",
"id": "f5f8810f6fe4b5e88bdb8d947031cafab3215129",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "467069"
},
{
"name": "Python",
"bytes": "71993"
},
{
"name": "R",
"bytes": "16709"
}
],
"symlink_target": ""
} |
from tensorrec import TensorRec
from tensorrec.eval import fit_and_eval
from tensorrec.representation_graphs import (
LinearRepresentationGraph, ReLURepresentationGraph, NormalizedLinearRepresentationGraph
)
from tensorrec.loss_graphs import WMRBLossGraph, BalancedWMRBLossGraph
from tensorrec.prediction_graphs import (
DotProductPredictionGraph, CosineSimilarityPredictionGraph, EuclideanSimilarityPredictionGraph
)
from tensorrec.util import append_to_string_at_point
from test.datasets import get_movielens_100k
import logging
logging.getLogger().setLevel(logging.INFO)
# Load the movielens dataset
train_interactions, test_interactions, user_features, item_features, _ = get_movielens_100k(negative_value=0)
# Construct parameters for fitting
epochs = 300
alpha = 0.00001
n_components = 10
verbose = True
learning_rate = .01
n_sampled_items = int(item_features.shape[0] * .1)
biased = False
fit_kwargs = {'epochs': epochs, 'alpha': alpha, 'verbose': verbose, 'learning_rate': learning_rate,
'n_sampled_items': n_sampled_items}
res_strings = []
# Build results header
header = "Loss Graph"
header = append_to_string_at_point(header, 'Prediction Graph', 30)
header = append_to_string_at_point(header, 'ItemRepr Graph', 66)
header = append_to_string_at_point(header, 'Biased', 98)
header = append_to_string_at_point(header, 'N Tastes', 108)
header = append_to_string_at_point(header, 'Recall at 30', 120)
header = append_to_string_at_point(header, 'Precision at 5', 141)
header = append_to_string_at_point(header, 'NDCG at 30', 164)
res_strings.append(header)
# Iterate through many possibilities for model configuration
for loss_graph in (WMRBLossGraph, BalancedWMRBLossGraph):
for pred_graph in (DotProductPredictionGraph, CosineSimilarityPredictionGraph,
EuclideanSimilarityPredictionGraph):
for repr_graph in (LinearRepresentationGraph, ReLURepresentationGraph):
for n_tastes in (1, 3):
# Build the model, fit, and get a result packet
model = TensorRec(n_components=n_components,
n_tastes=n_tastes,
biased=biased,
loss_graph=loss_graph(),
prediction_graph=pred_graph(),
user_repr_graph=NormalizedLinearRepresentationGraph(),
item_repr_graph=repr_graph())
result = fit_and_eval(model, user_features, item_features, train_interactions, test_interactions,
fit_kwargs)
# Build results row for this configuration
res_string = "{}".format(loss_graph.__name__)
res_string = append_to_string_at_point(res_string, pred_graph.__name__, 30)
res_string = append_to_string_at_point(res_string, repr_graph.__name__, 66)
res_string = append_to_string_at_point(res_string, biased, 98)
res_string = append_to_string_at_point(res_string, n_tastes, 108)
res_string = append_to_string_at_point(res_string, ": {}".format(result[0]), 118)
res_string = append_to_string_at_point(res_string, result[1], 141)
res_string = append_to_string_at_point(res_string, result[2], 164)
res_strings.append(res_string)
print(res_string)
print('--------------------------------------------------')
for res_string in res_strings:
print(res_string)
| {
"content_hash": "a503abfd0c2c6f6b5860a57b7e0ca8c7",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 113,
"avg_line_length": 46.89473684210526,
"alnum_prop": 0.6459034792368126,
"repo_name": "jfkirk/tensorrec",
"id": "662528f0dd135746f0feb24107aca4e515369cf8",
"size": "3564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/check_movielens_losses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "161191"
}
],
"symlink_target": ""
} |
import os
import datetime
import zipfile
import re
import shutil
from math import ceil
from logging import getLogger
import nkf
from sqlalchemy import desc
from werkzeug import secure_filename
from db_models.shared_models import db
import common.utils as ds_util
import chardet
logger = getLogger(__name__)
class Dataset(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, unique=True, nullable=False)
dataset_path = db.Column(db.Text, unique=True)
type = db.Column(db.Text)
category_num = db.Column(db.Integer)
file_num = db.Column(db.Integer)
updated_at = db.Column(db.DateTime)
created_at = db.Column(db.DateTime)
models = db.relationship('Model', backref='dataset', lazy='dynamic')
def __init__(self, name, type, dataset_path=None):
self.name = name
self.dataset_path = dataset_path
self.type = type
self.updated_at = datetime.datetime.now()
self.created_at = datetime.datetime.now()
def __repr__(self):
return
@classmethod
def get_datasets_with_samples(cls, limit=0, offset=0):
if limit == 0:
datasets = cls.query.order_by(desc(Dataset.updated_at))
else:
datasets = cls.query.order_by(desc(Dataset.updated_at)).limit(limit).offset(offset)
ret = []
dirty = False
for dataset in datasets:
if not os.path.exists(dataset.dataset_path):
continue
if dataset.file_num is None:
dataset.file_num = ds_util.count_files(dataset.dataset_path)
dirty = True
if dataset.category_num is None:
dataset.category_num = ds_util.count_categories(dataset.dataset_path)
dirty = True
if dirty:
dataset.update_and_commit()
if dataset.type == 'image':
dataset.thumbnails = []
thumbnails = ds_util.get_images_in_random_order(dataset.dataset_path, 4)
if len(thumbnails) == 0:
continue
for t in thumbnails:
dataset.thumbnails.append('/files/' + str(dataset.id)
+ t.replace(dataset.dataset_path, ''))
elif dataset.type == 'text':
dataset.sample_text = ds_util.get_texts_in_random_order(dataset.dataset_path,
1, 180)
dataset.filesize = ds_util.calculate_human_readable_filesize(
ds_util.get_file_size_all(dataset.dataset_path))
ret.append(dataset)
return ret, cls.query.count()
def get_dataset_with_categories_and_samples(self, limit=20, offset=0):
dataset_root = self.dataset_path
if len(os.listdir(dataset_root)) == 1:
dataset_root = os.path.join(dataset_root, os.listdir(dataset_root)[0])
self.pages = int(ceil(float(self.category_num) / limit))
self.categories = []
for index, p in enumerate(ds_util.find_all_directories(dataset_root)):
if index < offset or offset + limit - 1 < index:
continue
if self.type == 'image':
thumbs = ds_util.get_images_in_random_order(p, 4)
thumbs = map(lambda t: '/files/' + str(self.id) + t.replace(self.dataset_path, ''),
thumbs)
self.categories.append({
'dataset_type': self.type,
'path': p.replace(self.dataset_path, ''),
'file_num': ds_util.count_files(p),
'category': os.path.basename(p),
'thumbnails': thumbs
})
elif self.type == 'text':
self.categories.append({
'dataset_type': self.type,
'path': p.replace(self.dataset_path, ''),
'file_num': ds_util.count_files(p),
'category': os.path.basename(p),
'sample_text': ds_util.get_texts_in_random_order(p, 1, 180)
})
return self
def get_dataset_with_category_detail(self, category, offset=0, limit=100):
category_root = os.path.join(self.dataset_path, category)
self.category = os.path.basename(category_root)
files = []
for i, p in enumerate(ds_util.find_all_files(category_root)):
if i < offset or offset + limit - 1 < i:
continue
if self.type == 'image':
files.append('/files/' + str(self.id) + p.replace(self.dataset_path, ''))
elif self.type == 'text':
files.append({
'sample_text': ds_util.get_text_sample(p, 180),
'text_path': p.replace(self.dataset_path, '')
})
self.files = files
self.count = ds_util.count_files(category_root)
self.pages = int(ceil(float(self.count) / limit))
self.category_root = category_root.replace(self.dataset_path, '')
self.original_category = category
return self
def delete(self):
db.session.delete(self)
try:
shutil.rmtree(self.dataset_path)
except Exception as e:
logger.exception('Could not delete {0}. {1}'.format(self.dataset_path, e))
raise
db.session.commit()
@classmethod
def remove_category(cls, id, category_path):
dataset = cls.query.get(id)
abs_path = os.path.normpath(dataset.dataset_path + category_path)
deleted_file_num = ds_util.count_files(abs_path)
try:
shutil.rmtree(abs_path)
except Exception as e:
logger.exception('Could not delete {0}. {1}'.format(dataset.dataset_path, e))
raise
dataset.category_num -= 1
dataset.file_num -= deleted_file_num
dataset.update_and_commit()
@classmethod
def create_category(cls, id, name):
ds = cls.query.get(id)
target = ds.dataset_path
if len(os.listdir(target)) == 1:
only_one_child = os.listdir(target)[0]
candidate = os.path.join(target, only_one_child)
path_name_sample = ds_util.get_files_in_random_order(candidate, 1)[0]
if os.path.split(path_name_sample)[0] != candidate:
target = candidate
try:
os.mkdir(os.path.join(target, name))
ds.category_num += 1
except Exception as e:
logger.exception('Could not create directory: {0} {1}'
.format(os.path.join(target, name).encode('utf-8'), e))
raise
ds.update_and_commit()
def save_uploaded_data(self, uploaded_file, save_raw_file_to, save_to):
filename = uploaded_file.filename
name, ext = os.path.splitext(filename)
if ext not in ('.zip'):
raise ValueError('Invalid file type. Only zip file is allowed: ' + filename)
timestamp_str = ds_util.get_timestamp()
new_filename = secure_filename(re.sub(r'\.zip$', '_' + timestamp_str + '.zip', filename))
uploaded_file.save(os.path.join(save_raw_file_to, new_filename))
# extract zip file
extract_to = os.path.join(save_to, timestamp_str)
self.dataset_path = extract_to
try:
os.mkdir(extract_to)
except Exception as e:
logger.exception('Could not create directory to extract zip file: {0} {1}'
.format(extract_to, e))
raise
file_num = 0
category_num = 0
try:
zf = zipfile.ZipFile(os.path.join(save_raw_file_to, new_filename), 'r')
name_list = zf.namelist()
encoding = chardet.detect(''.join(name_list)).get('encoding', 'utf-8')
for file_name in (file_name.decode(encoding) for file_name in name_list):
if ('__MACOSX' in file_name) or ('.DS_Store' in file_name):
continue
temp_path = os.path.join(extract_to, file_name)
if not os.path.basename(file_name):
if not os.path.exists(temp_path):
os.makedirs(temp_path.encode(encoding='utf-8'))
category_num += 1
else:
temp, ext = os.path.splitext(temp_path)
ext = ext.lower()
if self.type == 'image':
if ext not in ('.jpg', '.jpeg', '.png', '.gif'):
continue
elif self.type == 'text':
if ext not in ('.txt',):
continue
if os.path.exists(temp_path):
uzf = file(temp_path, 'w+b')
else:
uzf = file(temp_path, 'wb')
uzf.write(zf.read(file_name.encode(encoding=encoding)))
uzf.close()
file_num += 1
except Exception as e:
logger.exception('Could not extract zip file: {0}'.format(e))
raise
finally:
if 'zf' in locals():
zf.close()
if 'uzf' in locals():
uzf.close()
self.category_num = category_num
self.file_num = file_num
self.update_and_commit()
def save_uploaded_file_to_category(self, uploaded_file, category):
filename = uploaded_file.filename
name, ext = os.path.splitext(filename)
ext = ext.lower()
if self.type == 'image':
if ext not in ('.jpg', '.jpeg', '.png', '.gif'):
raise ValueError('Invalid file type.')
elif self.type == 'text':
if ext not in ('.txt',):
raise ValueError('Invalid file type.')
new_filename = os.path.join(self.dataset_path, category,
ds_util.get_timestamp() + '_' + secure_filename(filename))
if self.type == 'image':
uploaded_file.save(new_filename)
elif self.type == 'text':
text = uploaded_file.stream.read()
if nkf.guess(text) == 'binary':
raise ValueError('Invalid file type. File must be a text file.')
f = open(new_filename, 'w')
f.write(text)
f.close()
self.file_num += 1
self.update_and_commit()
def remove_file_from_category(self, target_file):
if self.type == 'image':
target_file = target_file.replace('/files/' + str(self.id) + '/', '')
file_path = os.path.normpath(self.dataset_path + os.sep + target_file)
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
logger.exception("Could not remove file: {0} {1}".format(file_path, e))
raise
self.file_num -= 1
self.update_and_commit()
def get_full_text(self, target_file):
file_path = os.path.join(self.dataset_path, target_file)
text = ds_util.get_text_sample(file_path)
text = text.replace("\r", '')
text = text.replace("\n", '<br>')
return text
def update_dataset_path(self, new_path):
if not os.path.exists(new_path):
logger.error('directory not exists: {}'.format(new_path))
raise Exception('Directory not exists: {}'.format(new_path))
if not os.path.isdir(new_path):
logger.error('not a directory: {}'.format(new_path))
raise Exception('Not a directory: {}'.format(new_path))
self.dataset_path = new_path
self.update_and_commit()
def update_and_commit(self):
self.updated_at = datetime.datetime.now()
db.session.add(self)
db.session.commit()
| {
"content_hash": "0cbc50431248004b708e70f94b530080",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 99,
"avg_line_length": 42.00701754385965,
"alnum_prop": 0.5364183093885734,
"repo_name": "SonyCSL/CSLAIER",
"id": "026cba8ba92e40b1ba760085a44b5e632da84127",
"size": "11998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/db_models/datasets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8380"
},
{
"name": "HTML",
"bytes": "68445"
},
{
"name": "JavaScript",
"bytes": "432469"
},
{
"name": "Python",
"bytes": "175757"
},
{
"name": "Shell",
"bytes": "292"
}
],
"symlink_target": ""
} |
'''
This relies on font2.py, rgb_text2.py and ili9341_text2.py to display info on the TFT FeatherWing
Uses umqtt_client_official.py
The mqtt topic is determined by the config file but previously was hardcoded as 'esp_tft'
The format of the mqtt messages is:
{"header":"Weather", "text":"Some text goes here", "pos":2}
my thought is to display all messages at the top of the display so the pos(ition) doesn't matter
Note you must transfer config, mqtt_id and location to the esp8266 (e.g., using ampy)
'''
import machine
from time import sleep, time
import json
import network
from config import ssid, pw, topic
from config import mqtt_aws_host as host
import ili9341_text2 as ili
from umqtt_client_official import MQTTClient as umc
with open('mqtt_id', 'r') as f:
mqtt_id = f.read().strip()
print("mqtt_id =", mqtt_id)
print("host =", host)
spi = machine.SPI(1, baudrate=32000000)
d = ili.ILI9341(spi, cs=machine.Pin(0), dc=machine.Pin(15))
d.fill(0)
d.draw_text(0, 0, "Hello Steve", ili.color565(255,255,255))
c = umc(mqtt_id, host, 1883)
def wrap(text,lim):
lines = []
pos = 0
line = []
for word in text.split():
if pos + len(word) < lim + 1:
line.append(word)
pos+= len(word) + 1
else:
lines.append(' '.join(line))
line = [word]
pos = len(word)
lines.append(' '.join(line))
return lines
def run():
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect(ssid, pw)
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
def callback(topic, msg):
zz = json.loads(msg.decode('utf-8'))
d.fill_rectangle(0, 0, 240, 320, 0) # erase before writing new info
d.draw_text(0, 0, zz.get('header', "No header"), ili.color565(0,255,0))
y = 0
for line in zz.get('text', ["No text"]):
lines = wrap(line, 26)
for line in lines:
y+=15
d.draw_text(0, y, line, ili.color565(255,255,255))
r = c.connect()
print("connect:",r)
c.set_callback(callback)
r = c.subscribe(topic)
print("subscribe:",r)
sleep(5)
cur_time = time()
while 1:
c.check_msg()
t = time()
if t > cur_time + 30:
c.ping()
cur_time = t
sleep(1)
run()
| {
"content_hash": "f9a686ea830027958d93b63488d269e8",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 97,
"avg_line_length": 24.752688172043012,
"alnum_prop": 0.6355343179843614,
"repo_name": "slzatz/esp8266",
"id": "9a4641196a6a59fda12b1d234dec2a6d6d2787a3",
"size": "2302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esp_display_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "122427"
}
],
"symlink_target": ""
} |
""" This web server will handle the training of the TensorFlow model and the image sets
that will be used for training. """
import os
from stat import *
from flask import Flask, request, redirect, url_for, flash, send_from_directory, json
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = './static/uploads'
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'png', 'bmp'])
APP = Flask(__name__, static_url_path='/static')
APP.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
""" Check if the attached file contains the valid image extensions defined
in ALLOWED_EXTENSIONS """
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@APP.route('/')
def hello_world():
""" Index page. TODO: Replace with a dashboard or something to help manage the server """
return APP.send_static_file('index.html')
@APP.route('/<path:path>')
def static_file(path):
""" Serve static files inside the static folder. """
return APP.send_static_file(path)
@APP.route('/images', methods=['GET', 'POST'])
def images():
""" Handles the training set images
GET: Displays the image upload page
POST: Upload the given image to store under the UPLOAD_FOLDER. """
if request.method == 'GET':
# Display file upload form
return APP.send_static_file('upload.html')
if request.method == 'POST':
# Upload the images attached to the request under the set_name folder
if 'file' not in request.files:
flash('No files attached')
return redirect(request.url)
img = request.files['file']
# if user does not select file, browser also
# submit a empty part without a filename
if img.filename == '':
flash('No file selected')
return redirect(request.url)
if img and allowed_file(img.filename):
filename = secure_filename(img.filename)
directory = APP.config['UPLOAD_FOLDER']
# create the file path if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# save the image in the directory
img.save(os.path.join(directory, filename))
return redirect('/images')
# Check if model has update
@APP.route('/push-model-update')
def push_model_update():
""" Send the model file to the client. """
return send_from_directory('tensorflow/examples/android/assets', \
'tensorflow_inception_graph.pb')
@APP.route('/push-label-update')
def push_label_update():
""" Send the lable file to the client. """
return send_from_directory('tensorflow/examples/android/assets', \
'imagenet_comp_graph_label_strings.txt')
@APP.route('/check-version')
def check_version():
""" Check the metadata of the model file to see if there is a new
version available. """
update_available = False
client_mod_time = int(request.args.get('time-modified'))
client_size = int(request.args.get('size'))
file_info = os.stat('tensorflow/examples/android/assets/tensorflow_inception_graph.pb')
size = file_info[ST_SIZE]
mod_time = file_info[ST_MTIME]
print 'client mod time: {0}, client size: {1}'.format(client_mod_time, client_size)
print 'server mod time: {0}, server size: {1}'.format(mod_time, size)
# compare the last modified time first
if client_mod_time < mod_time:
print 'client mod time is older than server mod time'
update_available = True
return APP.response_class(
response=json.dumps(update_available),
status=200,
mimetype='application/json'
)
if client_size != size:
print 'client size is not the same as server size'
update_available = True
return APP.response_class(
response=json.dumps(update_available),
status=200,
mimetype='application/json'
)
print 'no update available'
return APP.response_class(
response=json.dumps(update_available),
status=200,
mimetype='application/json'
)
| {
"content_hash": "ec93bfe658d40b53e080dc137f09c86a",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 93,
"avg_line_length": 35.29059829059829,
"alnum_prop": 0.645919108743037,
"repo_name": "odejesush/tensorflow",
"id": "9b1a747372d7f405968331ecf3a8e310493ef435",
"size": "4151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "177136"
},
{
"name": "C++",
"bytes": "20597014"
},
{
"name": "CMake",
"bytes": "120039"
},
{
"name": "CSS",
"bytes": "7005"
},
{
"name": "Go",
"bytes": "103991"
},
{
"name": "HTML",
"bytes": "551211"
},
{
"name": "Java",
"bytes": "209382"
},
{
"name": "JavaScript",
"bytes": "20077"
},
{
"name": "Jupyter Notebook",
"bytes": "1833831"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "32007"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "187378"
},
{
"name": "Python",
"bytes": "20689461"
},
{
"name": "Shell",
"bytes": "325470"
},
{
"name": "TypeScript",
"bytes": "765149"
}
],
"symlink_target": ""
} |
import json
import yaml
from kpm.commands.deploy import DeployCmd
class GenerateCmd(DeployCmd):
name = 'generate'
help_message = "Generate a package json"
def _call(self):
k = self.kub()
if k.target == "docker-compose":
self.output = 'yaml'
self._generate()
def _generate(self):
k = self.kub()
filename = "%s_%s.tar.gz" % (k.name.replace("/", "_"), k.version)
with open(filename, 'wb') as f:
f.write(k.build_tar("."))
def _render_json(self):
print json.dumps(self.kub().build(), indent=2, separators=(',', ': '))
def _render_yaml(self):
print yaml.safe_dump(self.kub().build())
def _render_console(self):
self._render_json()
| {
"content_hash": "2ae3f58dfc3b734e28572cbaf8dbc2b0",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 26.06896551724138,
"alnum_prop": 0.5621693121693122,
"repo_name": "ant31/kpm",
"id": "b7f2a3c5ecab07adb4e4deef8ccdc6c739fc3e13",
"size": "756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kpm/commands/generate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6288"
},
{
"name": "HTML",
"bytes": "21178"
},
{
"name": "JavaScript",
"bytes": "19999"
},
{
"name": "Makefile",
"bytes": "3016"
},
{
"name": "Python",
"bytes": "153218"
},
{
"name": "Shell",
"bytes": "240"
}
],
"symlink_target": ""
} |
"""
Unit test for the programmable tuple metaclass
"""
import unittest
import itertools
from programmabletuple import ProgrammableTuple, ProgrammableExpr
#
# The programmable tuples class definition
# ========================================
#
# Some utility functions
# ----------------------
#
def _get_full_name(first_name, last_name):
"""Gets the full name"""
return ', '.join([last_name, first_name])
def _get_sui(self):
"""Gets the age in conventional Asian way
Here the new borns starts at one year old.
"""
return self.age + 1
#
# The actual classes
# ------------------
#
class PersonPT(ProgrammableTuple, auto_defining=True):
"""A toy person class
It just has three simple fields, first name, last name, and age,
the full name is also given in a data field.
The defining fields are going to be assigned automatically.
"""
__data_fields__ = [
'full_name',
]
def __init__(self, first_name, last_name, age):
"""Initialize a person
The full name will be set as well.
"""
self.full_name = _get_full_name(first_name, last_name)
sui = property(_get_sui)
class PersonPE(ProgrammableExpr):
"""A toy person class as programmable expression
It is just like the above class. Just the defining fields are going to be
assigned manually and it is not a tuple subclass.
"""
__data_fields__ = [
'full_name',
]
def __init__(self, first_name, last_name, age):
"""Initialize a person
The full name will be set as well.
"""
self.first_name = first_name
self.last_name = last_name
self.age = age
self.full_name = _get_full_name(first_name, last_name)
sui = property(_get_sui)
#
# Subclass definition
# ===================
#
class JohnsonsPT(PersonPT):
"""Members of the Johnson family"""
def __init__(self, first_name, age):
self.super().__init__(first_name, 'Johnson', age)
def is_johnsons(self):
return True
class JohnsonsPE(PersonPE):
"""Members of the Johnson family"""
def __init__(self, first_name, age):
self.super().__init__(first_name, 'Johnson', age)
def is_johnsons(self):
return True
#
# The tests
# =========
#
class ImmutableClassTest(unittest.TestCase):
"""Test suite for the programmable tuple metaclass"""
def setUp(self):
self.jsmith_pt = PersonPT('John', 'Smith', 49)
self.jsmith_pe = PersonPE('John', 'Smith', 49)
self.jsmiths = [self.jsmith_pt, self.jsmith_pe]
self.ajohnson_pt = JohnsonsPT('Andy', 8)
self.ajohnson_pe = JohnsonsPE('Andy', 8)
#
# Tests of the essential behaviour of programmable tuples
#
def test_access(self):
"""Tests the access of the fields of the person"""
for jsmith in self.jsmiths:
self.assertEqual(jsmith.first_name, 'John')
self.assertEqual(jsmith.last_name, 'Smith')
self.assertEqual(jsmith.age, 49)
self.assertEqual(jsmith.full_name, 'Smith, John')
def test_method(self):
"""Tests if the method defined in the class can be called"""
for jsmith in self.jsmiths:
self.assertEqual(jsmith.sui, 50)
def test_immutability(self):
"""Tests if the attributes are really not mutable"""
def mutate_pt():
self.jsmith_pt.age = 15
def mutate_pe():
self.jsmith_pe.age = 15
self.assertRaises(AttributeError, mutate_pt)
self.assertRaises(AttributeError, mutate_pe)
def test_subclassing(self):
"""Tests if the subclassing is working properly"""
for andy in [self.ajohnson_pt, self.ajohnson_pe]:
self.assertEqual(andy.first_name, 'Andy')
self.assertEqual(andy.last_name, 'Johnson')
self.assertEqual(andy.age, 8)
self.assertEqual(andy.sui, 9)
self.assertEqual(andy.full_name, 'Johnson, Andy')
self.assertTrue(andy.is_johnsons())
def test_hashing(self):
"""Tests the correctness of hashing and equality testing"""
equal_ones = [] # Each entry is a list of equal ones. Different
# entries are not equal.
for i in self.jsmiths:
equal_ones.append([
i, type(i)('John', 'Smith', 49)
])
equal_ones.append([type(i)('John', 'Smith', 3)])
for i, v in enumerate(equal_ones):
# Assert that each pair within the chunk are equal and the same
# hash.
for j, k in itertools.combinations(v, 2):
self.assertEqual(hash(j), hash(k))
self.assertEqual(j, k)
continue
# Assert than each member of the chunk is not equal and has
# different hash with anything else.
for j in v:
for k in itertools.chain.from_iterable(equal_ones[i + 1:]):
self.assertNotEqual(hash(j), hash(k))
self.assertNotEqual(j, k)
continue
continue
# Continue to the next chunk.
continue
#
# Tests of the utilities in the mixin class
#
def test_update(self):
"""Tests updating a defining attribute"""
for jsmith in self.jsmiths:
doug = jsmith._update(first_name='Doug')
self.assertEqual(doug.first_name, 'Doug')
self.assertEqual(doug.last_name, 'Smith')
self.assertEqual(doug.full_name, 'Smith, Doug')
self.assertEqual(doug.age, 49)
def test_replace(self):
"""Tests forced replacement of an attribute"""
for jsmith in self.jsmiths:
doug_inconsistent = jsmith._replace(first_name='Doug')
self.assertEqual(doug_inconsistent.first_name, 'Doug')
self.assertEqual(doug_inconsistent.last_name, 'Smith')
self.assertEqual(doug_inconsistent.full_name, 'Smith, John')
def test_formating(self):
"""Tests the formatting as repr and str"""
# We need to test all combinations, repr and str, with PT and PE.
repr_args = "(first_name='John', last_name='Smith', age=49)"
str_args = "(first_name=John, last_name=Smith, age=49)"
for head, person in [
('PersonPT', self.jsmith_pt), ('PersonPE', self.jsmith_pe)
]:
self.assertEqual(repr(person), head + repr_args)
self.assertEqual(str(person), head + str_args)
continue
def test_asdict(self):
"""Tests the asdict methods
Here only the naive encoding and decoding are tested, not the
complicated recursive cases.
"""
for jsmith in self.jsmiths:
# Tests the conversion to dictionaries.
def_dict = jsmith._asdict()
full_dict = jsmith._asdict(full=True)
for i in [def_dict, full_dict]:
self.assertEqual(i['first_name'], 'John')
self.assertEqual(i['last_name'], 'Smith')
self.assertEqual(i['age'], 49)
self.assertEqual(len(def_dict), 3)
self.assertEqual(full_dict['full_name'], 'Smith, John')
self.assertEqual(len(full_dict), 4)
# Tests the loading from dictionaries.
resolved_jsmith = jsmith._load_from_dict(def_dict)
self.assertEqual(jsmith, resolved_jsmith)
resolved_jsmith = jsmith._load_from_dict(full_dict, full=True)
self.assertEqual(jsmith, resolved_jsmith)
| {
"content_hash": "502ec0fd05130c7734011ed28eddc528",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 77,
"avg_line_length": 27.778181818181817,
"alnum_prop": 0.5813588165990313,
"repo_name": "tschijnmo/programmabletuple",
"id": "a0c8451e1228869d47f1c022c4cde28f3e116631",
"size": "7639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "programmabletuple/tests/programmabletuple_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33845"
}
],
"symlink_target": ""
} |
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 56677 if testnet else 56577
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| {
"content_hash": "f4719a39c9d5ac05052110e23383df1c",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.3968253968254,
"alnum_prop": 0.6155436130632492,
"repo_name": "007au/AmeristralianDollar",
"id": "df9080a3267fb4168098e508ec98dcda9066a58f",
"size": "10054",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "31254"
},
{
"name": "C++",
"bytes": "2498190"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Makefile",
"bytes": "885"
},
{
"name": "Objective-C",
"bytes": "858"
},
{
"name": "Objective-C++",
"bytes": "5711"
},
{
"name": "Python",
"bytes": "69727"
},
{
"name": "Shell",
"bytes": "9737"
},
{
"name": "TypeScript",
"bytes": "5282166"
}
],
"symlink_target": ""
} |
from xml.dom import minidom
class XmlNode:
"""An XML node represents a single field in an XML document."""
def __init__(self, domElement):
"""Construct an XML node from a DOM element."""
self.elem = domElement
@classmethod
def makeRoot(cls, xmlFileName):
return cls(minidom.parse(xmlFileName))
def getData(self):
"""Extract data from a DOM node."""
for child in self.elem.childNodes:
if child.nodeType == child.TEXT_NODE:
return str(child.data)
return None
def getAttributeValue(self, name):
"""Returns the value of the attribute having the specified name."""
return str(self.elem.attributes[name].value)
def getChild(self, tag):
"""Returns the first child node having the specified tag."""
return XmlNode(self.elem.getElementsByTagName(tag)[0])
def getChildren(self, tag):
"""Returns a list of child nodes having the specified tag."""
return [XmlNode(x) for x in self.elem.getElementsByTagName(tag)]
| {
"content_hash": "85b22a11f2d397e18d501b3bdef7410a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 34.58064516129032,
"alnum_prop": 0.6361940298507462,
"repo_name": "ActiveState/code",
"id": "02b2defa806d84b91854e8c9c211ad7122775e78",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577451_Easy_XML/recipe-577451.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from twisted.internet import defer # pylint: disable=import-error
from twisted.spread import pb # pylint: disable=import-error
class AsyncCommandBase:
MAX_BUFFER_SIZE = 1024 * 1024 # 1Mb
def __init__(self, options=None, on_end_callback=None):
self.options = options or {}
self.on_end_callback = on_end_callback
self._buffer = b""
self._return_code = None
self._d = None
self._paused = False
try:
self.start()
except Exception as exc:
raise pb.Error(str(exc)) from exc
@property
def id(self):
return id(self)
def pause(self):
self._paused = True
self.stop()
def unpause(self):
self._paused = False
self.start()
def start(self):
raise NotImplementedError
def stop(self):
self.transport.loseConnection() # pylint: disable=no-member
def _ac_ended(self):
if self.on_end_callback:
self.on_end_callback()
if not self._d or self._d.called:
self._d = None
return
if self._buffer:
self._d.callback(self._buffer)
else:
self._d.callback(None)
def _ac_ondata(self, data):
self._buffer += data
if len(self._buffer) > self.MAX_BUFFER_SIZE:
self._buffer = self._buffer[-1 * self.MAX_BUFFER_SIZE :]
if self._paused:
return
if self._d and not self._d.called:
self._d.callback(self._buffer)
self._buffer = b""
def ac_read(self):
if self._buffer:
result = self._buffer
self._buffer = b""
return result
if self._return_code is None:
self._d = defer.Deferred()
return self._d
return None
def ac_write(self, data):
self.transport.write(data) # pylint: disable=no-member
return len(data)
def ac_close(self):
self.stop()
return self._return_code
| {
"content_hash": "717607c2cc2842e39d4aba6a50a89fcd",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 68,
"avg_line_length": 26.2987012987013,
"alnum_prop": 0.5466666666666666,
"repo_name": "platformio/platformio-core",
"id": "8105062c7040b7313c793ff61ccb2395eca19ab6",
"size": "2636",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "platformio/remote/ac/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2005"
},
{
"name": "Makefile",
"bytes": "710"
},
{
"name": "Processing",
"bytes": "101"
},
{
"name": "Python",
"bytes": "1345194"
},
{
"name": "Smarty",
"bytes": "52334"
}
],
"symlink_target": ""
} |
from time import sleep
from pytest import set_trace # noqa
from selenium.common import exceptions
from brome.core.selector import Selector
from brome_config import default_config, default_browser_config
from model.basetest import BaseTest
from model.selector import selector_dict
from model.test_dict import test_dict
from model.user import User
from brome.runner.local_runner import LocalRunner
def test_wait_until_visible(browser_name, brome):
class TestWaitUntilVisible(object):
class Test(BaseTest):
name = 'Wait until visible'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("wait_until_visible_test")
element = self.pdriver.find(
"id:2",
wait_until_present=False,
wait_until_visible=False,
raise_exception=False
)
assert not element
element = self.pdriver.find(
"id:2",
wait_until_visible=True,
raise_exception=False
)
assert element
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestWaitUntilVisible]
)
LocalRunner(brome).execute()
def test_wait_until_present(browser_name, brome):
class TestWaitUntilPresent(object):
class Test(BaseTest):
name = 'Wait until present'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("wait_until_present_test")
el = self.pdriver.wait_until_present("id:3")
assert el.get_attribute('id') == '3'
el = self.pdriver.wait_until_present(
"id:1",
raise_exception=False,
timeout=6
)
assert el.get_attribute('id') == '1'
el = self.pdriver.wait_until_present(
"id:2",
raise_exception=False
)
assert not el
el = self.pdriver.wait_until_present(
"id:2",
raise_exception=False,
timeout=11
)
assert el.get_attribute('id') == '2'
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestWaitUntilPresent]
)
LocalRunner(brome).execute()
def test_wait_until_not_visible(browser_name, brome):
class TestWaitUntilNotVisible(object):
class Test(BaseTest):
name = 'Wait until not visible'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("wait_until_not_visible_test")
self.pdriver.wait_until_not_visible(
"id:2",
raise_exception=False
)
element = self.pdriver.find("id:2", raise_exception=False)
assert not element
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestWaitUntilNotVisible]
)
LocalRunner(brome).execute()
def test_wait_until_not_present(browser_name, brome):
class TestWaitUntilNotPresent(object):
class Test(BaseTest):
name = 'Wait until not present'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("wait_until_not_present_test")
ret = self.pdriver.wait_until_not_present(
"id:3",
raise_exception=False
)
assert ret
ret = self.pdriver.wait_until_not_present(
"id:4",
raise_exception=False
)
assert not False
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestWaitUntilNotPresent]
)
LocalRunner(brome).execute()
def test_state(browser_name, brome):
class UnStateful(object):
pass
class TestState(object):
class Test(BaseTest):
name = 'State'
def create_state(self):
self.unstateful = UnStateful()
self.stateful = User(self.pdriver, 'test')
self.int_ = 1
self.float_ = 0.1
self.unicode_ = u'test'
self.str_ = 'str'
self.list_ = [1, 2]
self.dict_ = {'key': 1}
def run(self, **kwargs):
self.info_log("Running...")
# TODO find a way to the load the state before asserting
# assert not hasattr(self, 'unstateful')
assert hasattr(self, 'stateful')
assert hasattr(self, 'int_')
assert hasattr(self, 'float_')
assert hasattr(self, 'unicode_')
assert hasattr(self, 'str_')
assert hasattr(self, 'list_')
assert hasattr(self, 'dict_')
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestState]
)
LocalRunner(brome).execute()
def test_selector(browser_name, brome):
class TestSelector(object):
class Test(BaseTest):
name = 'Selector'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("selector_test")
# ##TAG NAME
# FIND ALL BY TAG NAME
elements = self.pdriver.find_all("tn:h1")
assert len(elements) == 2
# FIND BY TAG NAME
element = self.pdriver.find("tn:h1")
assert element.get_attribute('id') == '1'
# FIND LAST BY TAG NAME
element = self.pdriver.find_last("tn:h1")
assert element.get_attribute('id') == '2'
# FIND BY TAG NAME DOESNT EXIST
element = self.pdriver.find(
"tn:thisdoesntexist",
raise_exception=False,
wait_until_visible=False,
wait_until_present=False
)
assert not element
# FIND BY TAG NAME DOESNT EXIST RAISE EXCEPTION
try:
self.pdriver.find(
"tn:thisdoesntexist",
wait_until_visible=False,
wait_until_present=False
)
assert False
except exceptions.NoSuchElementException:
assert True
# ##NAME
# FIND ALL BY NAME
elements = self.pdriver.find_all("nm:name-selector")
assert len(elements) == 2
# FIND BY NAME
element = self.pdriver.find("nm:name-selector")
assert element.get_attribute('id') == '3'
# FIND LAST BY NAME
element = self.pdriver.find_last("nm:name-selector")
assert element.get_attribute('id') == '4'
# FIND BY NAME DOESNT EXIST
element = self.pdriver.find(
"nm:thisdoesntexist",
raise_exception=False,
wait_until_visible=False,
wait_until_present=False
)
assert not element
# FIND BY NAME DOESNT EXIST RAISE EXCEPTION
try:
self.pdriver.find(
"nm:thisdoesntexist",
wait_until_visible=False,
wait_until_present=False
)
assert False
except exceptions.NoSuchElementException:
assert True
# ##CLASS NAME
# FIND ALL BY CLASS NAME
elements = self.pdriver.find_all("cn:class-selector")
assert len(elements) == 2
# FIND BY CLASS NAME
element = self.pdriver.find("cn:class-selector")
assert element.get_attribute('id') == '2'
# FIND LAST BY CLASS NAME
element = self.pdriver.find_last("cn:class-selector")
assert element.get_attribute('id') == '5'
# FIND BY CLASS NAME DOESNT EXIST
element = self.pdriver.find(
"cn:thisdoesntexist",
raise_exception=False,
wait_until_visible=False
)
assert not element
# FIND BY CLASS NAME DOESNT EXIST RAISE EXCEPTION
try:
self.pdriver.find(
"cn:thisdoesntexist",
wait_until_visible=False,
wait_until_present=False
)
assert False
except exceptions.NoSuchElementException:
assert True
# ##ID
# FIND ALL BY ID
elements = self.pdriver.find_all("id:1")
assert len(elements) == 1
# FIND BY ID
element = self.pdriver.find("id:1")
assert element.get_attribute('id') == '1'
# FIND LAST BY ID
element = self.pdriver.find_last("id:1")
assert element.get_attribute('id') == '1'
# FIND BY ID DOESNT EXIST
element = self.pdriver.find(
"id:thisdoesntexist",
raise_exception=False,
wait_until_visible=False
)
assert not element
# FIND BY ID DOESNT EXIST RAISE EXCEPTION
try:
self.pdriver.find(
"id:thisdoesntexist",
wait_until_visible=False,
wait_until_present=False
)
assert False
except exceptions.NoSuchElementException:
assert True
# ##XPATH
# FIND ALL BY XPATH
elements = self.pdriver.find_all("xp://h1")
assert len(elements) == 2
# FIND BY XPATH
element = self.pdriver.find("xp://*[@id = 1]")
assert element.get_attribute('id') == '1'
# FIND LAST BY XPATH
element = self.pdriver.find_last("xp://h1")
assert element.get_attribute('id') == '2'
# FIND BY XPATH DOESNT EXIST
element = self.pdriver.find(
"xp://*[@class = 'thisdoesntexist']",
raise_exception=False,
wait_until_visible=False,
wait_until_present=False
)
assert not element
# FIND BY XPATH DOESNT EXIST RAISE EXCEPTION
try:
self.pdriver.find(
"xp://*[@class = 'thisdoesntexist']",
wait_until_visible=False,
wait_until_present=False
)
assert False
except exceptions.NoSuchElementException:
assert True
# ##CSS
# FIND ALL BY CSS
elements = self.pdriver.find_all("cs:h1")
assert len(elements) == 2
# FIND BY CSS
element = self.pdriver.find("cs:h1")
assert element.get_attribute('id') == '1'
# FIND LAST BY CSS
element = self.pdriver.find_last("cs:h1")
assert element.get_attribute('id') == '2'
# FIND BY CSS DOESNT EXIST
element = self.pdriver.find(
"cs:.thisdoesntexist",
raise_exception=False,
wait_until_visible=False,
wait_until_present=False
)
assert not element
# FIND BY CSS DOESNT EXIST RAISE EXCEPTION
try:
self.pdriver.find(
"cs:.thisdoesntexist",
wait_until_visible=False,
wait_until_present=False
)
assert False
except exceptions.NoSuchElementException:
assert True
# ##LINK TEXT
# FIND ALL BY LINK TEXT
elements = self.pdriver.find_all("lt:link-text-selector")
assert len(elements) == 2
# FIND BY LINK TEXT
element = self.pdriver.find("lt:link-text-selector")
assert element.get_attribute('id') == '6'
# FIND LAST BY LINK TEXT
element = self.pdriver.find_last("lt:link-text-selector")
assert element.get_attribute('id') == '7'
# FIND BY LINK TEXT DOESNT EXIST
element = self.pdriver.find(
"lt:text-selector",
raise_exception=False,
wait_until_visible=False,
wait_until_present=False
)
assert not element
# FIND BY LINK TEXT DOESNT EXIST RAISE EXCEPTION
try:
self.pdriver.find(
"lt:thisdoesntexit",
wait_until_visible=False,
wait_until_present=False
)
assert False
except exceptions.NoSuchElementException:
assert True
# ##PARTIAL LINK TEXT
# FIND ALL BY PARTIAL LINK TEXT
elements = self.pdriver.find_all("pl:text-selector")
assert len(elements) == 2
# FIND BY PARTIAL LINK TEXT
element = self.pdriver.find("pl:text-selector")
assert element.get_attribute('id') == '6'
# FIND LAST BY PARTIAL LINK TEXT
element = self.pdriver.find_last("pl:text-selector")
assert element.get_attribute('id') == '7'
# FIND BY PARTIAL LINK TEXT DOESNT EXIST
element = self.pdriver.find(
"pl:thisdoesntexist",
raise_exception=False,
wait_until_visible=False,
wait_until_present=False
)
assert not element
# FIND BY PARTIAL LINK TEXT DOESNT EXIST RAISE EXCEPTION
try:
self.pdriver.find(
"pl:thisdoesntexit",
wait_until_visible=False,
wait_until_present=False
)
assert False
except exceptions.NoSuchElementException:
assert True
# ##SELECTOR VARIABLE
# Selector variable that doesnt exist in the selector dict
selector = "sv:not_exist"
try:
Selector(self.pdriver, selector)
assert False
except:
assert True
# Selector variable that are invalid
# selector_dict['test_2'] = "zz://*[@id = '1']"
selector = "sv:test_2"
try:
Selector(self.pdriver, selector)
assert False
except:
assert True
# Single selector
selector = "sv:test_1"
_selector = Selector(self.pdriver, selector)
assert _selector.get_selector() == selector_dict["test_1"][3:]
# Double selector
selector_1 = "sv:test_3"
selector_2 = "sv:test_4"
_selector = Selector(self.pdriver, [selector_1, selector_2])
assert _selector.get_selector() == selector_dict["test_3"][3:] + selector_dict["test_4"][3:] # noqa
# Multiple selector
selector_1 = "sv:test_3"
selector_2 = "sv:test_4"
selector_3 = "sv:test_5"
_selector = Selector(self.pdriver, [selector_1, selector_2, selector_3]) # noqa
assert _selector.get_selector() == selector_dict["test_3"][3:] + selector_dict["test_4"][3:] + selector_dict["test_5"][3:] # noqa
# Multiple selector with mismatch
selector_1 = "sv:test_3"
selector_2 = "sv:test_7"
try:
Selector(self.pdriver, [selector_1, selector_2])
assert False
except:
assert True
# Selector browser specific
self.app.pdriver._driver.capabilities['browserName'] = 'firefox' # noqa
_selector = Selector(self.pdriver, 'sv:example_multiple_selector') # noqa
assert _selector.get_selector() == selector_dict['example_multiple_selector']['default'][3:] # noqa
self.app.pdriver._driver.capabilities['browserName'] = 'chrome'
_selector = Selector(self.pdriver, 'sv:example_multiple_selector') # noqa
assert _selector.get_selector() == selector_dict['example_multiple_selector']['chrome|iphone|android'][3:] # noqa
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestSelector]
)
LocalRunner(brome).execute()
def test_select_all(browser_name, brome):
class TestSelectAll(object):
class Test(BaseTest):
name = 'Select all'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("select_all_test")
self.pdriver.find("id:selectme").select_all()
# TODO find a way to know that the text has been selected
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestSelectAll]
)
LocalRunner(brome).execute()
def test_assert_wait_until_visible(browser_name, brome):
class TestAssertWaitUntilVisible(object):
class Test(BaseTest):
name = 'Is visible'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("wait_until_visible_test")
ret = self.pdriver.is_visible("id:3")
assert ret
ret = self.pdriver.is_visible("id:1")
assert not ret
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertWaitUntilVisible]
)
LocalRunner(brome).execute()
def test_assert_wait_until_present(browser_name, brome):
class TestAssertWaitUntilPresent(object):
class Test(BaseTest):
name = 'Is present'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("wait_until_present_test")
ret = self.pdriver.is_present("id:2")
assert not ret
ret = self.pdriver.is_present("id:3")
assert ret
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertWaitUntilPresent]
)
LocalRunner(brome).execute()
def test_assert_intercept_javascript_error_disabled(browser_name, brome):
class TestAssertInterceptJavascriptErrorDisabled(object):
class Test(BaseTest):
name = 'Intercept javascript error disabled'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to('intercept_javascript_error_test')
js_error = self.pdriver.get_javascript_error(
return_type='list'
)
assert js_error == []
js_error = self.pdriver.get_javascript_error(
return_type='string'
)
assert js_error == []
self.pdriver.find("id:error-btn").click()
sleep(2)
js_error = self.pdriver.get_javascript_error(
return_type='list'
)
assert js_error == []
self.pdriver.find("id:error-btn").click()
sleep(2)
js_error = self.pdriver.get_javascript_error(
return_type='string'
)
assert js_error == []
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome_config['proxy_driver']['intercept_javascript_error'] = False
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertInterceptJavascriptErrorDisabled]
)
LocalRunner(brome).execute()
def test_assert_intercept_javascript_error(browser_name, brome):
class TestAssertInterceptJavascriptError(object):
class Test(BaseTest):
name = 'Intercept javascript error'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to('intercept_javascript_error_test')
js_error = self.pdriver.get_javascript_error(
return_type='list'
)
assert js_error == []
js_error = self.pdriver.get_javascript_error(
return_type='string'
)
assert js_error == self.pdriver.no_javascript_error_string
self.pdriver.find("id:error-btn").click()
sleep(2)
js_error = self.pdriver.get_javascript_error(
return_type='list'
)
assert len(js_error) > 0
self.pdriver.find("id:error-btn").click()
sleep(2)
js_error = self.pdriver.get_javascript_error(
return_type='string'
)
assert js_error != self.pdriver.no_javascript_error_string
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome_config['proxy_driver']['intercept_javascript_error'] = True
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertInterceptJavascriptError]
)
LocalRunner(brome).execute()
def test_assert_inject_script(browser_name, brome):
class TestAssertInjectScript(object):
class Test(BaseTest):
name = 'Inject script'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("select_all_test")
"""
try:
ret = self.pdriver.execute_script(
"return module_test.test;"
)
assert False
except WebDriverException:
assert True
"""
self.pdriver.inject_js_script("/static/test_script.js")
sleep(2)
ret = self.pdriver.execute_script("return module_test.test;")
assert ret == 'test'
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertInjectScript]
)
LocalRunner(brome).execute()
def test_assert_dnd(browser_name, brome):
class TestAssertDnd(object):
class Test(BaseTest):
name = 'Drag and Drop'
def run(self, **kwargs):
self.info_log("Running...")
self.app.pdriver.get("http://touchpunch.furf.com/content.php?/droppable/default-functionality") # noqa
self.app.pdriver.drag_and_drop(
"id:draggable",
"id:droppable"
)
ret = self.app.pdriver.assert_visible(
"xp://p[contains(text(), 'Dropped!')]"
)
assert ret
# TODO investigate and fix
"""
self.app.go_to("dnd_test")
self.app.pdriver.drag_and_drop(
"id:column-a",
"id:column-b"
)
els = self.app.pdriver.find_all("cn:column")
assert els[0].get_attribute('id') == "column-b"
"""
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertDnd]
)
LocalRunner(brome).execute()
def test_assert_click(browser_name, brome):
class TestAssertClick(object):
class Test(BaseTest):
name = 'Click'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("click_test")
element = self.pdriver.find("id:1")
element.click()
result = self.pdriver.find(
"xp://*[contains(text(), 'clicked')]"
)
assert result
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertClick]
)
LocalRunner(brome).execute()
def test_assert_visible(browser_name, brome):
class TestAssertVisible(object):
class Test(BaseTest):
name = 'Assert visible'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("wait_until_visible_test")
ret = self.pdriver.assert_visible(
"id:2",
wait_until_visible=False
)
assert not ret
ret = self.pdriver.assert_visible(
"id:2",
wait_until_visible=True
)
assert ret
ret = self.pdriver.assert_visible("id:3")
assert ret
ret = self.pdriver.assert_visible("id:1")
assert not ret
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertVisible]
)
LocalRunner(brome).execute()
def test_assert_equal_not_equal(browser_name, brome):
class TestAssertEqualNotEqual(object):
class Test(BaseTest):
name = 'Assert equal and not equal'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("assert_text_test")
ret = self.app.pdriver.assert_text_equal(
"id:1",
"this is visible"
)
assert ret
ret = self.app.pdriver.assert_text_not_equal(
"id:1",
"whatever"
)
assert ret
ret = self.app.pdriver.assert_text_not_equal(
"id:1",
"this is visible"
)
assert not ret
ret = self.app.pdriver.assert_text_equal("id:1", "whatever")
assert not ret
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertEqualNotEqual]
)
LocalRunner(brome).execute()
def test_assert_present(browser_name, brome):
class TestAssertPresent(object):
class Test(BaseTest):
name = 'Assert present'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("assert_present_test")
ret = self.pdriver.assert_present(
"id:2",
wait_until_present=False
)
assert not ret
ret = self.pdriver.assert_present("id:3")
assert ret
ret = self.pdriver.assert_present(
"id:2",
wait_until_present=True
)
assert ret
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertPresent]
)
LocalRunner(brome).execute()
def test_assert_not_visible(browser_name, brome):
class TestAssertNotVisible(object):
class Test(BaseTest):
name = 'Assert not visible'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("assert_not_visible_test")
ret = self.pdriver.assert_not_visible("id:2")
assert ret
self.app.go_to("assert_not_visible_test")
ret = self.pdriver.assert_not_visible(
"id:3",
wait_until_not_visible=False
)
assert not ret
self.app.go_to("assert_not_visible_test")
ret = self.pdriver.assert_not_visible(
"id:3",
wait_until_not_visible=True
)
assert ret
ret = self.pdriver.assert_not_visible("id:2")
assert not ret
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertNotVisible]
)
LocalRunner(brome).execute()
def test_assert_not_present(browser_name, brome):
class TestAssertNotPresent(object):
class Test(BaseTest):
name = 'Assert not present'
def run(self, **kwargs):
self.info_log("Running...")
self.app.go_to("assert_not_present_test")
ret = self.pdriver.assert_not_present(
"id:3",
wait_until_not_present=False
)
assert not ret
self.app.go_to("assert_not_present_test")
ret = self.pdriver.assert_not_present(
"id:2",
wait_until_not_present=False
)
assert ret
self.app.go_to("assert_not_present_test")
ret = self.pdriver.assert_not_present(
"id:3",
wait_until_not_present=True
)
assert ret
brome_config = default_config.copy()
brome_config['runner_args']['localhost_runner'] = browser_name
brome.configure(
config=brome_config,
selector_dict=selector_dict,
test_dict=test_dict,
browsers_config=default_browser_config,
tests=[TestAssertNotPresent]
)
LocalRunner(brome).execute()
| {
"content_hash": "c3b94b45fce7a95d041d8ce56b7bb808",
"timestamp": "",
"source": "github",
"line_count": 1107,
"max_line_length": 146,
"avg_line_length": 31.092140921409214,
"alnum_prop": 0.5035300270199599,
"repo_name": "jf-parent/brome",
"id": "ce0a49196f9f54ac634161f27d090349f844e06c",
"size": "34419",
"binary": false,
"copies": "2",
"ref": "refs/heads/release",
"path": "tests/brome/test_proxy_driver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "950"
},
{
"name": "HTML",
"bytes": "12122"
},
{
"name": "JavaScript",
"bytes": "267133"
},
{
"name": "Makefile",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "481322"
},
{
"name": "Smarty",
"bytes": "731"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('apps.core.urls')),
url(r'^', include('apps.ctf.urls')),
url(r'^api/', include('apps.core.api.urls')),
url(r'^api/', include('apps.ctf.api.urls')),
url(r'^token/', 'apps.core.api.auth.obtain_auth_token'),
)
| {
"content_hash": "dda091777bafd157fa75d43ff3df7d5e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 60,
"avg_line_length": 25.58823529411765,
"alnum_prop": 0.6344827586206897,
"repo_name": "blstream/CaptureTheFlag",
"id": "b82a9fba3894c6b255c6ecbddeb0a88de8dcc564",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctf-web-app/conf/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "232"
},
{
"name": "Python",
"bytes": "82182"
}
],
"symlink_target": ""
} |
"""Tests for proto_task_queue.worker."""
from unittest import mock
import uuid
import warnings
from absl.testing import absltest
from absl.testing import parameterized
from google.protobuf import text_format
from proto_task_queue import task_pb2
from proto_task_queue import test_task_pb2
from proto_task_queue import worker
from google.cloud.pubsub_v1.subscriber import client
from google.cloud.pubsub_v1.subscriber import message
def _make_task_bytes(args):
"""Returns a serialized Task proto.
Args:
args: Proto message to put in the Task's args field.
"""
task = task_pb2.Task()
task.args.Pack(args)
return task.SerializeToString()
def _make_mock_pubsub_message(task_args):
"""Returns a mock pubsub message.
Args:
task_args: Proto message to use as args to the task to put in the pubsub
message.
"""
msg = mock.create_autospec(message.Message)
msg.data = _make_task_bytes(task_args)
msg.message_id = str(uuid.uuid4())
return msg
class WorkerTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._client = mock.create_autospec(client.Client)
self._task_to_string = mock.Mock(side_effect=text_format.MessageToString)
self._worker = worker.Worker(
pubsub_subscriber_client=self._client,
task_to_string=self._task_to_string,
)
def test_routing_and_task_success(self):
# Register two types of tasks.
foo_task_processor = mock.Mock()
bar_task_processor = mock.Mock()
self._worker.register(test_task_pb2.FooTaskArgs, foo_task_processor)
self._worker.register(test_task_pb2.BarTaskArgs, bar_task_processor)
# Create pubsub messages.
foo_task_args = test_task_pb2.FooTaskArgs(widget='Water is made of water.')
foo_message = _make_mock_pubsub_message(foo_task_args)
bar_task_args = test_task_pb2.BarTaskArgs(best_number=42)
bar_message = _make_mock_pubsub_message(bar_task_args)
# Subscribe, and process the messages.
subscribe_retval = 'grapes of testing'
self._mock_subscribe([foo_message, bar_message], retval=subscribe_retval)
self.assertIs(subscribe_retval, self._worker.subscribe('kumquat'))
self._client.subscribe.assert_called_once_with('kumquat', mock.ANY)
# Test that the messages were routed to the correct tasks, and that both
# messages were ACKed.
foo_task_processor.assert_called_once_with(foo_task_args)
foo_message.ack.assert_called_once_with()
bar_task_processor.assert_called_once_with(bar_task_args)
bar_message.ack.assert_called_once_with()
def test_register_after_subscribe_error(self):
self._worker.subscribe('kumquat')
with self.assertRaisesRegex(RuntimeError,
'register.*after a subscriber is started'):
self._worker.register(test_task_pb2.FooTaskArgs, mock.Mock())
@parameterized.named_parameters(
('invalid_task_proto', b'this is probably not a valid binary proto'),
('unregistered_type', _make_task_bytes(test_task_pb2.FooTaskArgs())))
def test_unhandled_message(self, message_data):
self._worker.register(test_task_pb2.BarTaskArgs, mock.Mock())
msg = mock.create_autospec(message.Message)
msg.data = message_data
msg.message_id = str(uuid.uuid4())
self._mock_subscribe([msg])
with warnings.catch_warnings():
# Ignore a warning from trying to parse an invalid binary proto.
warnings.filterwarnings('ignore', 'Unexpected end-group tag:')
self._worker.subscribe('kumquat')
msg.nack.assert_called_once_with()
def test_task_to_string_error(self):
self._task_to_string.side_effect = RuntimeError(
'Unable to convert proto to string.')
self._worker.register(test_task_pb2.FooTaskArgs, mock.Mock())
foo_message = _make_mock_pubsub_message(test_task_pb2.FooTaskArgs())
self._mock_subscribe([foo_message])
self._worker.subscribe('kumquat')
foo_message.nack.assert_called_once_with()
def test_callback_error(self):
foo_task_processor = mock.Mock(side_effect=RuntimeError('foo error'))
self._worker.register(test_task_pb2.FooTaskArgs, foo_task_processor)
foo_message = _make_mock_pubsub_message(test_task_pb2.FooTaskArgs())
self._mock_subscribe([foo_message])
self._worker.subscribe('kumquat')
foo_message.nack.assert_called_once_with()
def _mock_subscribe(self, messages, retval=None):
"""Set up self._client.subscribe() to mock messages being published.
Note: This processes messages synchronously in the calling thread, unlike a
real pubsub subscriber.
Args:
messages: Iterable of pubsub messages to treat as if they were received
from pubsub.
retval: Value to return from subscribe(). Normally that function would
return a Future, but that isn't needed for this synchronous
implementation.
"""
def side_effect(subscription, callback):
del subscription # Unused.
for msg in messages:
callback(msg)
return retval
self._client.subscribe.side_effect = side_effect
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "5098247e431f10374eb89140d6d11b38",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 36.07801418439716,
"alnum_prop": 0.7063102024769019,
"repo_name": "google/proto-task-queue",
"id": "738cda457fe066f9297780a4b5ae9d1668d824f0",
"size": "5662",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proto_task_queue/worker_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "798"
},
{
"name": "Python",
"bytes": "24031"
}
],
"symlink_target": ""
} |
BOT_NAME = 'artbot'
SPIDER_MODULES = ['artbot.spiders']
NEWSPIDER_MODULE = 'artbot.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'artbot (+http://www.yourdomain.com)'
USER_AGENT_LIST = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY=8
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'artbot.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'artbot.middlewares.MyCustomDownloaderMiddleware': 543,
#}
HTTP_PROXY = 'http://127.0.0.1:8123'
DOWNLOADER_MIDDLEWARES = {
'artbot.middlewares.RandomUserAgentMiddleware': 400,
# 'artbot.middlewares.ProxyMiddleware': 410,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
# Disable compression middleware, so the actual HTML pages are cached
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'artbot.pipelines.SomePipeline': 300,
#}
ITEM_PIPELINES = {
'artbot.pipelines.ArtbotPipeline': 1
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| {
"content_hash": "c650fe333b2dc9a29c0070780cc34c20",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 123,
"avg_line_length": 47.25714285714286,
"alnum_prop": 0.7392180572349859,
"repo_name": "zirui/poetry",
"id": "b318d90acae8381ef4c94aae1749efa03d99645c",
"size": "5393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artbot/artbot/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9223"
}
],
"symlink_target": ""
} |
import mock
from oslo.serialization import jsonutils
from nailgun.db.sqlalchemy.models import Release
from nailgun.settings import settings
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def test_release_put_change_name_and_version(self):
release = self.env.create_release(api=False)
resp = self.app.put(
reverse('ReleaseHandler', kwargs={'obj_id': release.id}),
params=jsonutils.dumps({
'name': 'modified release',
'version': '5.1'
}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(200, resp.status_code)
release_from_db = self.db.query(Release).one()
self.db.refresh(release_from_db)
self.assertEqual('5.1', release_from_db.version)
self.assertEqual('5.1', resp.json_body['version'])
self.assertEqual('modified release', resp.json_body['name'])
def test_release_put_returns_400_if_no_body(self):
release = self.env.create_release(api=False)
resp = self.app.put(
reverse('ReleaseHandler', kwargs={'obj_id': release.id}),
"",
headers=self.default_headers,
expect_errors=True)
self.assertEqual(resp.status_code, 400)
def test_release_delete_returns_400_if_clusters(self):
cluster = self.env.create_cluster(api=False)
resp = self.app.delete(
reverse('ReleaseHandler',
kwargs={'obj_id': cluster.release.id}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(
resp.json_body["message"],
"Can't delete release with "
"clusters assigned"
)
@mock.patch.dict(settings.VERSION, {'feature_groups': ['mirantis']})
def test_release_put_deployable(self):
release = self.env.create_release(api=False)
for deployable in (False, True):
resp = self.app.put(
reverse('ReleaseHandler', kwargs={'obj_id': release.id}),
params=jsonutils.dumps({
'is_deployable': deployable,
}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.assertEqual(resp.json_body['is_deployable'], deployable)
@mock.patch.dict(settings.VERSION, {'feature_groups': ['experimental']})
def test_release_deployable_in_experimental(self):
# set deployable to False
release = self.env.create_release(api=False)
resp = self.app.put(
reverse('ReleaseHandler', kwargs={'obj_id': release.id}),
params=jsonutils.dumps({
'is_deployable': False,
}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
# check that release is deployable
resp = self.app.get(
reverse('ReleaseHandler', kwargs={'obj_id': release.id}),
headers=self.default_headers,
)
self.assertEqual(200, resp.status_code)
self.assertEqual(resp.json_body['is_deployable'], True)
| {
"content_hash": "9175025358abab9772aab5379eeaea87",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 76,
"avg_line_length": 38.72941176470588,
"alnum_prop": 0.6008505467800729,
"repo_name": "zhaochao/fuel-web",
"id": "3319ef899c5a3874ac4eaf0bfe77bdb1665c3a28",
"size": "3927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/test/unit/test_release_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109800"
},
{
"name": "HTML",
"bytes": "16017"
},
{
"name": "JavaScript",
"bytes": "705662"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "3493678"
},
{
"name": "Ruby",
"bytes": "33590"
},
{
"name": "Shell",
"bytes": "26585"
}
],
"symlink_target": ""
} |
"""Layers used for experiments with sparsity."""
import functools
import math
import random as pyrandom
import numpy as np
from trax import fastmath
from trax import layers as tl
from trax.fastmath import numpy as jnp
from trax.fastmath import random
from trax.layers import base
from trax.layers import core
from trax.layers import initializers as init
from trax.layers import reversible
from trax.layers.assert_shape import assert_shape
# We use mixed CamelCase and snake_case names in this file.
# pylint: disable=invalid-name
@assert_shape('...->...')
class ReversibleReshapePermute(reversible.ReversibleLayer):
"""Simple and fast, reversible, random-looking permutation layer.
This layer permutates the last dimension (usually the embedding dimension)
with simple reshapes. It uses the same permutation for every embedding, and
permutation never changes.
The layer works only when the last dimension is a power of 2. The
permutation is not truly random, as it just uses reshapes to get a fast
random-looking permutation. It has, however, a permutation cycle length
of just log2(dimension_size).
"""
def forward(self, x):
shape = x.shape
x = x.reshape(shape[:-1]+(-1, self._get_multiplier(x)))
t_x = jnp.einsum('...ab->...ba', x) # transpose
return t_x.reshape(shape)
def reverse(self, x, weights=(), state=(), new_state=(), rng=None):
del state, new_state, rng
shape = x.shape
x = x.reshape(shape[:-1]+(self._get_multiplier(x), -1))
t_x = jnp.einsum('...ab->...ba', x) # transpose
return t_x.reshape(shape)
def _get_multiplier(self, x):
"""Return a size of the new dimension for reshaping.
We want to split the last dimension into two using approximately equal
dimensions, we could split a dimension of size 512 into 16 * 32.
However, not all numbers will work equally well, because we have a different
cycle length for permutations for different numbers. For example, for
dimension size 1024 and multiplier 32 we would get the same permutation
already after applying permutation twice (cycle length is 2), but with
multiplier 8 we would get the same permutation after appling permutation 10
times (cycle length is 10).
For powers of two the cycle length is limited by log2(dimension_size).
This function returns the biggest multiplier smaller than
sqrt(dimension_size) that keeps the longest possible cycle lenght of the
permutation.
Args:
x: The input tensor.
Returns:
An appropriate multiplier for the permutation reshape.
"""
last_dim = x.shape[-1]
def big_relatively_prime(n):
# The longest possible cycle is achieved iff log2(multiplier) and
# log2(dimension_size) are relatively prime. We choose the biggest such
# number smaller than sqrt(dimension_size).
for i in range(n//2, 0, -1):
if n%i != 0:
return i
return 1
max_cycle_len = int(math.log(last_dim, 2))
assert 2 ** max_cycle_len == last_dim
return 2 ** big_relatively_prime(max_cycle_len)
@assert_shape('...->...')
class ReversibleRandomPermute(reversible.ReversibleLayer):
"""Reversible, random permutation layer.
This layer permutates the last dimension (usually the embedding dimension)
by indexing and slicing. It uses the same random permutation for every
embedding, and this permutation never changes.
"""
def forward(self, x):
permutation, _ = self._get_permutation_and_reverse_permutation(x)
return x[..., permutation]
def reverse(self, x, weights=(), state=(), new_state=(), rng=None):
_, rev_permutation = self._get_permutation_and_reverse_permutation(x)
return x[..., rev_permutation]
def _get_permutation_and_reverse_permutation(self, x):
# TODO(jaszczur): random seed should be stored in state.
# Currently there is no way of doing it reliably.
last_dim = x.shape[-1]
permutation = list(range(last_dim))
rand = pyrandom.Random(42)
rand.shuffle(permutation)
rev_permutation = [permutation.index(i) for i in range(last_dim)]
return permutation, rev_permutation
@assert_shape('...a->...bc')
def SplitLastAxis(num_splits):
return tl.Fn(f'SplitLastAxis_{num_splits}',
lambda x: jnp.reshape(x, tuple(x.shape)[:-1] + (num_splits, -1)))
@assert_shape('...ab->...c')
def MergeLastTwoAxes():
return tl.Fn('MergeLastTwoAxes',
lambda x: jnp.reshape(x, tuple(x.shape)[:-2] + (-1,)))
@assert_shape('...a->...b')
def LocallyConnectedDense(n_modules, n_units, kernel_size=1,
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6),
use_bias=True):
"""Layer using LocallyConnected1d for approximation of Dense layer.
The layer splits the last axis of a tensor into `n_modules`, then runs
LocallyConnected1d (grouped convolution) on all those modules, and
concatenates their results. It is essentially a locally-sensitive
approximation of Dense layer, with number of parameters smaller by the factor
of `n_modules / kernel_size`.
Args:
n_modules: Indicates how many modules (pixels) should be input and output
split into for processing.
n_units: how many outputs (filters) should each module generate.
kernel_size: The size of the kernel to be used.
kernel_initializer: Function that creates a matrix of (random) initial
connection weights `W` for the layer.
bias_initializer: Function that creates a vector of (random) initial
bias weights `b` for the layer.
use_bias: If `True`, compute an affine map `y = Wx + b`; else compute
a linear map `y = Wx`.
Returns:
LocallyConnectedDense base.Layer.
"""
if n_modules == 1:
return tl.Dense(n_units, kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer, use_bias=use_bias)
return tl.Serial(
tl.SplitLastAxis(n_modules),
tl.LocallyConnected1d(
n_units, kernel_size, kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer, use_bias=use_bias, padding='WRAP'),
tl.MergeLastTwoAxes())
@assert_shape('bld->bld')
def ModularCausalAttention(d_feature, n_heads=1, sparsity=None, dropout=0.0,
max_inference_length=2048,
kernel_size=1, mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses LocallyConnectedDense instead of Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: Number of modules used in LocallyConnectedDense.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
kernel_size: Kernel size used in LocallyConnectedDense.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
n_modules = n_heads if sparsity is None else sparsity
@assert_shape('...a->...b')
def ProcessingLayer():
assert d_feature % n_modules == 0
return LocallyConnectedDense(n_modules, d_feature // n_modules,
kernel_size=kernel_size)
return tl.ConfigurableAttention(
ProcessingLayer(), ProcessingLayer(), ProcessingLayer(),
ProcessingLayer(), n_heads=n_heads,
qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode))
class _RememberPad(base.Layer):
"""Layer which remembers last N elements in predict mode."""
def __init__(self, n_items_to_remember, mode):
"""Returns a layer which remembers last N elements in predict mode.
For predict mode, the layer remembers last N elements and pads with them.
For other modes, it pads with zeros. The layer pads/remembers elements from
the second axis.
Args:
n_items_to_remember: Number of items to remember/pad with.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
super().__init__(name='_RememberPad')
self._n_items_to_remember = n_items_to_remember
self._mode = mode
self._portal_mask = self.monkey_patched_mask() # pylint: disable=assignment-from-none
def monkey_patched_mask(self):
# This is necessary for Terraformer model. See comments there.
# The mask will only be used in Terraformer in predict mode.
return None
def forward(self, x):
if self._n_items_to_remember == 0:
return x
if self._mode == 'predict':
x = jnp.concatenate([self.state[0], x], axis=1)
if self._portal_mask is not None and 'init' in self.state[1]:
# TODO(jaszczur): In predict mode with monkey-patched mask, we
# currently assume that batch size is 1.
assert x.shape[0] == 1
mask = self._portal_mask.get_value()
count_padding = jnp.sum(mask == 0, dtype=jnp.int32)
self.state = (fastmath.dynamic_slice_in_dim(
x, x.shape[1] - (self._n_items_to_remember + count_padding),
self._n_items_to_remember, axis=1), {'forward': ()})
else:
self.state = (x[:, -self._n_items_to_remember:, ...], {'forward': ()})
else:
pad_widths = [[0, 0] for _ in range(len(x.shape))]
pad_widths[1][0] = self._n_items_to_remember
x = jnp.pad(x, pad_width=pad_widths, mode='constant')
return x
def init_weights_and_state(self, input_signature):
"""Initializes this layer's weights."""
if isinstance(input_signature, (list, tuple)):
input_signature = input_signature[0]
self.weights = ()
if self._mode == 'predict':
shape = list(input_signature.shape)
shape[1] = self._n_items_to_remember
self.state = (jnp.zeros(shape, dtype=jnp.float32), {'init': ()})
else:
self.state = ()
@assert_shape('...a->...b')
def LocallyConvDense(n_modules, n_units, mode, kernel_size=1,
length_kernel_size=1):
"""Layer using local convolutions for approximation of Dense layer.
The layer splits the last axis of a tensor into `n_modules`, then runs
a convolution on all those modules, and concatenates their results.
It is similar to LocallyConnectedDense above, but shares weights.
Args:
n_modules: Indicates how many modules (pixels) should be input and output
split into for processing.
n_units: how many outputs (filters) should each module generate.
mode: One of `'train'`, `'eval'`, or `'predict'`.
kernel_size: The size of the kernel to be used.
length_kernel_size: If > 1, also do causal convolution on the previous axis,
which is often the sentence length in sequence models.
Returns:
LocallyConvDense base.Layer.
"""
if n_modules == 1:
return tl.Dense(n_units)
if kernel_size % 2 != 1:
raise ValueError('Currently we only handle odd kernel sizes.')
half = (kernel_size - 1) // 2
pad_widths = [[0, 0], [0, 0], [half, half], [0, 0]]
return tl.Serial(
tl.SplitLastAxis(n_modules),
tl.Fn('Pad', lambda x: jnp.pad(x, pad_width=pad_widths, mode='constant')),
_RememberPad(length_kernel_size-1, mode=mode),
tl.Conv(n_units, kernel_size=(length_kernel_size, kernel_size)),
tl.MergeLastTwoAxes()
)
@assert_shape('bld->bld')
def ConvCausalAttention(d_feature, n_heads=1, sparsity=None, dropout=0.0,
max_inference_length=2048,
kernel_size=1, mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses LocallyConvDense instead of Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: Number of modules used in LocallyConvDense.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
kernel_size: Kernel size used in LocallyConnectedDense.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
n_modules = n_heads if sparsity is None else sparsity
@assert_shape('...a->...b')
def ProcessingLayer():
assert d_feature % n_modules == 0
return LocallyConvDense(n_modules, d_feature // n_modules, mode=mode,
kernel_size=kernel_size)
return tl.ConfigurableAttention(
ProcessingLayer(), ProcessingLayer(), ProcessingLayer(),
ProcessingLayer(), n_heads=n_heads,
qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode))
@assert_shape('...a->...b')
def LowRankDense(n_units, d_lowrank):
return tl.Serial(
tl.Dense(d_lowrank),
tl.Dense(n_units)
)
@assert_shape('...a->...b')
def EinsumDense(d_input, d_output, use_bias):
"""Returns a reimplementation of Dense layer, using einsum.
While this is an equivalent of a Dense layer, it seems to be faster when used
in decoding if used with bias (see decoding_timing_test.py ).
This layer can be removed when we understand better the reason for the
difference in decoding speed.
Args:
d_input: Dimensionality of the input tensor.
d_output: Dimensionality of the output tensor.
use_bias: Whether to use bias.
"""
layers = [
tl.Weights(init.GlorotUniformInitializer(), [d_output, d_input]),
tl.Fn('EinsumDense',
(lambda kernel, embeds: # pylint: disable=g-long-lambda
jnp.einsum('xd,...d->...x', kernel, embeds)))
]
if use_bias:
layers.extend([
tl.Weights(init.RandomNormalInitializer(1e-6), [d_output]),
tl.Add()
])
return tl.Serial(layers)
def RandomLayer(layer_a, layer_b, prob_a):
"""Runs `layer_a` with probability `prob_a`, otherwise runs `layer_b`."""
condition = tl.Serial(
tl.RandomUniform(),
tl.Fn('SmallerThan', lambda x: x < prob_a)
)
return tl.Cond(condition, layer_a, layer_b)
@assert_shape('...a->...b')
def SparseDenseWithOptions(n_units, d_input=None, sparsity_type=None,
sparsity=0, d_lowrank=None, prob_sparse=None,
mode=None, use_bias=True, use_bfloat16=False):
"""Configurable sparse version of Dense layer."""
if prob_sparse is not None:
if mode is not None and mode != 'train':
# For non-training modes, we want to use a sparse variant.
# This is different than simply prob_sparse being None, as the weights of
# the model are different.
prob_sparse = 1.0
return RandomLayer(
SparseDenseWithOptions(n_units, d_input, sparsity_type, sparsity,
d_lowrank, use_bias=use_bias,
use_bfloat16=use_bfloat16),
tl.Dense(n_units, use_bias=use_bias, use_bfloat16=use_bfloat16),
prob_sparse)
if sparsity_type is None or sparsity_type == 'None' or sparsity == 0:
return tl.Dense(n_units, use_bias=use_bias, use_bfloat16=use_bfloat16)
if sparsity_type == 'mult':
return FactoredDense(sparsity, d_input, n_units, use_bias=use_bias,
use_bfloat16=use_bfloat16)
assert not use_bfloat16 # use_bfloat16 is unsupported for other variants
if sparsity_type == 'lowrank':
assert use_bias # use_bias=False is unsupported
return LowRankDense(n_units, d_lowrank)
if sparsity_type == 'einsum':
return EinsumDense(d_input, n_units, use_bias=use_bias)
if sparsity_type == 'local':
assert use_bias # use_bias = False is unsupported
assert n_units % sparsity == 0
return LocallyConnectedDense(sparsity, n_units/sparsity)
if sparsity_type == 'local3':
assert use_bias # use_bias = False is unsupported
assert n_units % sparsity == 0
return LocallyConnectedDense(sparsity, n_units/sparsity, kernel_size=3)
raise ValueError('Unknown sparsity type: {}'.format(sparsity_type))
@assert_shape('bld->bld')
def LowRankCausalAttention(d_feature, n_heads=1, dropout=0.0,
max_inference_length=2048, lowrank=64,
mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses low-rank approximation of kernel in Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
lowrank: The rank of low-rank approximation.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
return tl.ConfigurableAttention(
LowRankDense(d_feature, lowrank), LowRankDense(d_feature, lowrank),
LowRankDense(d_feature, lowrank), LowRankDense(d_feature, lowrank),
n_heads=n_heads, qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode))
@assert_shape('...a->...b')
def FactoredDense(n_modules, d_in, d_out, use_bias=True, use_bfloat16=False):
r"""Returns a Dense-like layer, internally factored to use fewer parameters.
This layer treats an activation vector as if divided into :math:`M`
subvectors (``n_modules`` 'modules'). It uses this factored view to compute
a :py:class:`Dense`-like mapping with high mixing/connectivity, but using
approximately :math:`1/M` the number of weights of a similarly dimensioned
:py:class:`Dense` layer.
More specifically, each activation vector of dimensionality ``n_in`` is
multiplied element-wise (a generalized form of gating) with ``n_modules``
vectors also of dimensionality ``n_in``. The resulting vectors are projected
to the subvector/module dimensionality ``d_out / n_modules`` via a matrix
multiply, and finally reshaped back to a single vector of dimensionality
``d_out``. Optionally, a bias vector of dimensionality ``d_out`` is added at
the end. All the above-mentioned non-input objects -- gating vectors,
projection matrix, and optional bias -- are trainable weights.
Args:
n_modules: Number by which an activation vector is divided into subvectors
(modules) for the factored computation.
d_in: Last/innermost dimension of input array.
d_out: Last/innermost dimension of output array.
use_bias: If True, add bias vectors at the end of the layer; else end the
layer with the matrix multiply.
use_bfloat16: If True, use bfloat16 weights; else use float32 weights.
"""
if d_out % n_modules != 0:
raise ValueError(f'Value d_out ({d_out}) must be a multiple of arg '
f'n_modules ({n_modules}).')
d_module = d_out // n_modules
def GatingVectors():
return tl.Weights(init.RandomNormalInitializer(stddev=0.5),
shape=[n_modules, d_in],
use_bfloat16=use_bfloat16)
def ProjectionMatrix():
return tl.Weights(init.GlorotUniformInitializer(),
shape=[d_in, d_module],
use_bfloat16=use_bfloat16),
def Bias():
return tl.Weights(init.RandomNormalInitializer(1e-6),
shape=[d_out],
use_bfloat16=use_bfloat16),
layers = [
GatingVectors(),
ProjectionMatrix(),
_GateAndProject(),
MergeLastTwoAxes(),
]
if use_bias:
layers += [Bias(), tl.Add()]
return tl.Serial(layers)
def _GateAndProject():
"""Returns a combined gating+projection layer that saves on memory."""
def f(projection, gating, x):
# Args arrive in reverse order because of how they were put on the stack.
# Einsum indices: d (d_in), n (n_modules), m (d_module = d_out/n_modules)
return jnp.einsum('...d,nd,dm->...nm', x, gating, projection)
return tl.Fn('_GateAndProject', f)
@assert_shape('...a->...a')
def MultiplicativeModularSparseDense(sparsity, d_feature):
"""Returns a replacement of Dense layer which uses less parameters.
The layer uses number of modules equal to `sparsity`. It is a combination of
multiplicative dense and locally connected dense layers.
Args:
sparsity: The sparsity of the layer; the output vector is divided into this
number of modules.
d_feature: Dimensionality of input and output tensor.
"""
assert d_feature % sparsity == 0
d_module = d_feature // sparsity
return tl.Serial(
# Weight below is used for per-head preprocessing of an embedding.
tl.Weights(init.RandomNormalInitializer(stddev=0.5),
shape=[sparsity, d_feature]),
# Weight below is a kernel of multiplicative dense, shared across heads.
tl.Weights(init.GlorotUniformInitializer(), [d_feature, d_module]),
# Weight below is a kernel of modular dense.
tl.Weights(functools.partial(init.GlorotUniformInitializer(),
nonreceptive_dims=[0]),
[sparsity, d_module, d_module]),
# To save memory the per-head preprocessing and multiplying by
# kernels is done in a single einsum.
tl.Fn('SparseDenseEinsum',
(lambda kmod, kmult, multiplier, embeds: # pylint: disable=g-long-lambda
jnp.einsum('hxo,dx,hd,...d->...ho', kmod, kmult, multiplier, embeds
))),
MergeLastTwoAxes(),
# Weight below is bias after dense, per-head.
tl.Weights(init.RandomNormalInitializer(1e-6), [d_feature]),
tl.Add(),
)
@assert_shape('bld->bld')
def MultiplicativeCausalAttention(d_feature, n_heads=1, sparsity=None,
dropout=0.0, max_inference_length=2048,
mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it multiplies each embedding
dimension by a scalar specific to each dimension and each head; then it
produces Q/K/V by applying the same dense layer to each head. In comparison
to standard dense layer for computing Q/K/V, this layer uses less parameters
while still being able to express many functions, like a permutation.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
sparsity = n_heads if sparsity is None else sparsity
return tl.ConfigurableAttention(
FactoredDense(sparsity, d_feature, d_feature),
FactoredDense(sparsity, d_feature, d_feature),
FactoredDense(sparsity, d_feature, d_feature),
FactoredDense(sparsity, d_feature, d_feature),
n_heads=n_heads, qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode))
@assert_shape('bld->bld')
def MultiplicativeModularCausalAttention(
d_feature, n_heads=1, sparsity=None, dropout=0.0, max_inference_length=2048,
mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it combines
FactoredDense layer with LocallyConnectedLayer.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
sparsity = n_heads if sparsity is None else sparsity
return tl.ConfigurableAttention(
MultiplicativeModularSparseDense(sparsity, d_feature),
MultiplicativeModularSparseDense(sparsity, d_feature),
MultiplicativeModularSparseDense(sparsity, d_feature),
MultiplicativeModularSparseDense(sparsity, d_feature), n_heads=n_heads,
qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode))
@assert_shape('bld->bld')
def MultiplicativeConvCausalAttention(
d_feature, n_heads=1, sparsity=None, length_kernel_size=3, dropout=0.0,
force_no_dropout=False, max_inference_length=2048, share_qk=False,
output_layer_type='none', v_concat_type='none', mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it combines
FactoredDense layer with LocallyConvLayer.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
length_kernel_size: Size of convolution kernel on the length dimension.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
force_no_dropout: If True, force dropout to be 0.0 independent of the above
value; used to override some configurations.
max_inference_length: maximum length for inference.
share_qk: if True, average Q and K embeddings and share for both Q and K.
output_layer_type: Which sparse layers to use for processing output from the
attention mechanism. One of `'none'`, `'mult'`, `'conv'`,
or `'multconv'`.
v_concat_type: What kind of concatenation to use when computing V tensor.
One of `'original'`, `'fixed'`, or `'none'`. `'none'` means using just
output from mutliplicative layer shared by Q, K, V. `'fixed'` means
using output from multiplicative layer concatenated, for each module,
with the layer input. `'original'` means using concatenation without
properly taking modules into account; this method was used in
experiments previously, so it is included for backwards-compatibility.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
assert output_layer_type in ['none', 'mult', 'conv', 'multconv']
assert v_concat_type in ['original', 'fixed', 'none']
dropout = 0.0 if force_no_dropout else dropout
sparsity = n_heads if sparsity is None else sparsity
d_module = d_feature // sparsity
output_layers = []
if 'mult' in output_layer_type:
output_layers.append(FactoredDense(
sparsity, d_feature, d_feature))
if 'conv' in output_layer_type:
output_layers.append(LocallyConvDense(
sparsity, d_module, mode=mode, kernel_size=3,
length_kernel_size=length_kernel_size))
if v_concat_type == 'original':
# 'original'` uses concatenation without properly taking modules into
# account; this method was used in experiments previously, so it is included
# for backwards-compatibility.
concat_layers = [tl.Concatenate()] # use permuted and original for v
elif v_concat_type == 'fixed':
# `'fixed'` uses the output from multiplicative layer concatenated, for each
# module, with the layer input. This means that every module in Conv layer
# has access both to parts of embeddings which were used to compute Q/K of
# this particular module, and it ha access to parts of the embedding which
# will be modified by this module.
concat_layers = [
tl.Parallel(
tl.Fn('Reshape1', lambda x: jnp.reshape( # pylint: disable=g-long-lambda
x, (x.shape[0], x.shape[1], sparsity, d_module))),
tl.Fn('Reshape2', lambda x: jnp.reshape( # pylint: disable=g-long-lambda
x, (x.shape[0], x.shape[1], sparsity, d_module)))),
tl.Concatenate(),
tl.Fn('Reshape3',
lambda x: jnp.reshape(x, (x.shape[0], x.shape[1], 2*d_feature))),
]
elif v_concat_type == 'none':
# `'none'` doesn't use concatenation: we throw away the original layer
# input and pass to Conv only output of shared Multiplicative layer.
concat_layers = [tl.Select([0], n_in=2)]
if share_qk:
return tl.Serial(
tl.Select([0, 0]), # pre-qkv, pre-v-for-concat
FactoredDense(sparsity, d_feature, d_feature), # shared q k
tl.Select([0, 0]), # pre-qk, pre-v, pre-v-for-concat
LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads),
tl.Select([0, 0]), # use for q and k
tl.Parallel(
[],
[],
[concat_layers,
LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=1,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads)],
),
tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode),
tl.MergeHeads(n_heads),
output_layers,
)
return tl.Serial(
tl.Select([0, 0]), # duplicate activations
FactoredDense(sparsity, d_feature, d_feature), # shared q, k
tl.Select([0, 0, 0]), # use for q, k, v
tl.Parallel(
[LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads)],
[LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads)],
[concat_layers,
LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=1,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads)],
),
tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode),
tl.MergeHeads(n_heads),
output_layers,
)
class FavorAttention(base.Layer):
"""Implements FAVOR+ attention.
Original paper: https://arxiv.org/abs/2006.03555
The layer expects 4 inputs: (Q, K, V, MASK), and returns two outputs:
(RENORMALIZED_ATTENTION, MASK).
Attributes:
d_feature: Dimensionality of feature embedding.
n_heads: Number of attention heads.
n_random_features: Free dimension size for the orthogonal random matrix.
numerical_stabilizer: float, small number used for numerical stability.
use_approximate_softmax: Bool, if True uses approximate softmax, otherwise
Relu.
scale_by_norm: Boolean; whether to scale orthogonal random matrix.
normalize_data: predicate indicating whether data should be normalized.
epsilon: numerical stabilizer.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
def __init__(self, d_feature=4, n_heads=1, n_random_features=256,
numerical_stabilizer=0.001,
use_approximate_softmax=False, scale_by_norm=True,
normalize_data=False,
epsilon=0.0001, mode='train'):
super().__init__(n_in=4, n_out=2)
self._d_feature = d_feature
self._n_heads = n_heads
self._n_random_features = n_random_features
self._numerical_stabilizer = numerical_stabilizer
self._mode = mode
self._use_approximate_softmax = use_approximate_softmax
self._normalize_data = normalize_data
self._epsilon = epsilon
if self._use_approximate_softmax:
rng = random.get_prng(0)
self._projection_matrix = self.get_2d_array(
rng=rng, n_rows=self._n_random_features,
n_columns=(self._d_feature // self._n_heads),
scale_by_norm=scale_by_norm,
normalize_data=normalize_data, epsilon=epsilon)
else:
self._projection_matrix = None
def nonnegative_softmax_kernel_feature_creator(self, x, is_query):
"""Constructs nonnegative kernel features for fast softmax attention.
Args:
x: input for which features are computed.
is_query: predicate indicating whether input data corresponds to
queries or keys.
Returns:
Random features for fast softmax attention.
"""
if self._normalize_data:
# We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where
# w_norm = w * data_normalizer for w in {q,k}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(x.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(self._projection_matrix.shape[0])
# TODO(wgaj): Double-check... Should there be only one batch dimension...?
data_mod_shape = x.shape[0:1] + self._projection_matrix.shape
data_thick_random_matrix = (jnp.zeros(data_mod_shape) +
self._projection_matrix)
data_dash = jnp.einsum('Bij, Bkj -> Bik',
data_normalizer * x,
data_thick_random_matrix)
diag_data = jnp.square(x)
diag_data = jnp.sum(diag_data, axis=x.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=x.ndim - 1)
last_dims_t = (len(data_dash.shape) - 1,)
attention_dims_t = (1,)
if is_query:
data_dash = ratio * (
jnp.exp(data_dash - diag_data -
jnp.max(data_dash, axis=last_dims_t, keepdims=True)) +
self._epsilon)
else:
data_dash = ratio * (
jnp.exp(data_dash - diag_data - jnp.max(
data_dash, axis=last_dims_t + attention_dims_t, keepdims=True)) +
self._epsilon)
return data_dash
@staticmethod
def get_2d_array(rng, n_rows=256, n_columns=0, scale_by_norm=True,
normalize_data=False, epsilon=0.0001):
"""Generator for approximate softmax orthogonal kernel feature matrix.
Args:
rng: Random number generator.
n_rows: Number of rows.
n_columns: Number of columns.
scale_by_norm: Boolean; whether to scale orthogonal random matrix.
normalize_data: predicate indicating whether data should be normalized.
epsilon: numerical stabilizer.
Returns:
Orthogonal kernel feature matrix.
"""
n_full_blocks = int(n_rows / n_columns)
block_list = []
rng_key = rng
for _ in range(n_full_blocks):
rng, rng_input = random.split(rng)
unstructured_block = random.normal(rng_input, (n_columns, n_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q)
remaining_rows = n_rows - n_full_blocks * n_columns
if remaining_rows > 0:
rng, rng_input = random.split(rng)
unstructured_block = random.normal(rng_input, (n_columns, n_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q[0:remaining_rows])
final_matrix = jnp.vstack(block_list)
if scale_by_norm:
multiplier = jnp.linalg.norm(
random.normal(rng_key, (n_rows, n_columns)), axis=1)
else:
multiplier = jnp.sqrt(float(n_columns)) * jnp.ones((n_rows))
return jnp.matmul(jnp.diag(multiplier), final_matrix)
@staticmethod
def bidirectional_numerator(query_prime, key_prime, value):
kvs = jnp.einsum('lbm,lbd->bmd', key_prime, value)
return jnp.einsum('lbm,bmd->lbd', query_prime, kvs)
@staticmethod
def bidirectional_denominator(query_prime, key_prime):
all_ones = jnp.ones([query_prime.shape[0]])
ks_sum = jnp.einsum('lbm,l->bm', key_prime, all_ones)
return jnp.einsum('lbm,bm->lb', query_prime, ks_sum)
@staticmethod
def relu(x):
return jnp.where(x <= 0, jnp.zeros_like(x), x)
def forward(self, inputs):
query, key, value, mask = inputs
if self._use_approximate_softmax:
query_prime = self.nonnegative_softmax_kernel_feature_creator(query, True)
key_prime = self.nonnegative_softmax_kernel_feature_creator(key, False)
else:
query_prime = self.relu(query) + self._numerical_stabilizer
key_prime = self.relu(key) + self._numerical_stabilizer
mask_batch_1_length = jnp.reshape(
mask, [key.shape[0] // self._n_heads, 1, key.shape[1]]).astype(
jnp.float32)
mask_heads = mask_batch_1_length + jnp.zeros((1, self._n_heads, 1))
key_prime *= jnp.reshape(mask_heads, [key.shape[0], key.shape[1], 1])
w = self.bidirectional_numerator(jnp.moveaxis(query_prime, 1, 0),
jnp.moveaxis(key_prime, 1, 0),
jnp.moveaxis(value, 1, 0))
r = self.bidirectional_denominator(jnp.moveaxis(query_prime, 1, 0),
jnp.moveaxis(key_prime, 1, 0))
w = jnp.moveaxis(w, 0, 1)
r = jnp.moveaxis(r, 0, 1)
r = jnp.reciprocal(r)
r = jnp.expand_dims(r, len(r.shape))
renormalized_attention = w * r
return renormalized_attention, mask
def Favor(d_feature, n_heads=1, n_random_features=256, dropout=0.0,
numerical_stabilizer=0.001, use_approximate_softmax=False,
scale_by_norm=0, normalize_data=False, epsilon=0.0001, mode='train'):
"""Returns a layer that maps (activations, mask) to (new_activations, mask).
See the FAVOR paper for details: https://arxiv.org/abs/2006.03555
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
n_random_features: Free dimension size for the orthogonal random matrix.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
numerical_stabilizer: float, small number used for numerical stability.
use_approximate_softmax: Bool, if True uses approximate softmax, otherwise
Relu.
scale_by_norm: Boolean; whether to scale orthogonal random matrix.
normalize_data: predicate indicating whether data should be normalized.
epsilon: numerical stabilizer.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
del dropout # not implemented yet but needed in the API
return tl.ConfigurableAttention(
tl.Dense(d_feature), tl.Dense(d_feature), tl.Dense(d_feature),
tl.Dense(d_feature),
tl.FavorAttention(d_feature, n_heads, n_random_features,
numerical_stabilizer, use_approximate_softmax,
scale_by_norm, normalize_data, epsilon, mode),
n_heads=n_heads)
class CausalFavorAttention(base.Layer):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
causal attention, but using FAVOR fast attention as in the following paper:
https://arxiv.org/abs/2006.03555
Layer expects three inputs (Q, K, V), and returns one output
RENORMALIZED_ATTENTION.
Attributes:
numerical_stabilizer: float, small number used for numerical stability.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
def __init__(self, numerical_stabilizer=0.001, mode='train'):
super().__init__(n_in=3, n_out=1)
self._numerical_stabilizer = numerical_stabilizer
self._mode = mode
def forward(self, inputs):
def favor_numerator_fwd(init_prefix_sum_value,
query_prime, key_prime, value):
def body(p, qkv):
(q, k, v) = qkv
p += jnp.einsum('...m,...d->...md', k, v)
x_slice = jnp.einsum('...m,...md->...d', q, p)
return p, x_slice
p, w = fastmath.scan(body, init_prefix_sum_value,
(query_prime, key_prime, value))
return w, (p, query_prime, key_prime, value)
def favor_numerator_bwd(pqkv, w_ct):
p, qs, ks, vs = pqkv
def body(carry, qkv_xct):
p, p_ct = carry
q, k, v, x_ct = qkv_xct
q_ct = jnp.einsum('...d,...md->...m', x_ct, p)
p_ct += jnp.einsum('...d,...m->...md', x_ct, q)
k_ct = jnp.einsum('...md,...d->...m', p_ct, v)
v_ct = jnp.einsum('...md,...m->...d', p_ct, k)
p -= jnp.einsum('...m,...d->...md', k, v)
return (p, p_ct), (q_ct, k_ct, v_ct)
_, (qs_ct, ks_ct, vs_ct) = fastmath.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, vs, w_ct), reverse=True)
return (None, qs_ct, ks_ct, vs_ct)
def favor_numerator(init_prefix_sum_value, query_prime,
key_prime, value):
w, _ = favor_numerator_fwd(init_prefix_sum_value,
query_prime, key_prime, value)
return w
favor_numerator = fastmath.custom_vjp(
favor_numerator, favor_numerator_fwd, favor_numerator_bwd)
def favor_denominator_fwd(init_prefix_sum_value,
query_prime, key_prime):
def body(p, qk):
q, k = qk
p += k
x = jnp.einsum('...m,...m->...', q, p)
return p, x
p, r = fastmath.scan(body, init_prefix_sum_value, (query_prime,
key_prime))
return r, (query_prime, key_prime, p)
def favor_denominator_bwd(qkp, r_ct):
qs, ks, p = qkp
def body(carry, qkx):
p, p_ct = carry
q, k, x_ct = qkx
q_ct = jnp.einsum('...,...m->...m', x_ct, p)
p_ct += jnp.einsum('...,...m->...m', x_ct, q)
k_ct = p_ct
p -= k
return (p, p_ct), (q_ct, k_ct)
_, (qs_ct, ks_ct) = fastmath.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, r_ct), reverse=True)
return (None, qs_ct, ks_ct)
def favor_denominator(init_prefix_sum_value, query_prime,
key_prime):
r, _ = favor_denominator_fwd(init_prefix_sum_value,
query_prime, key_prime)
return r
favor_denominator = fastmath.custom_vjp(
favor_denominator, favor_denominator_fwd, favor_denominator_bwd)
favor_denominator.defvjp(favor_denominator_fwd, favor_denominator_bwd)
def relu(x):
return jnp.where(x <= 0, jnp.zeros_like(x), x)
query, key, value = inputs
query_prime = relu(query) + self._numerical_stabilizer
key_prime = relu(key) + self._numerical_stabilizer
prefix_sum_tensor_shape = (key.shape[0], key.shape[-1], value.shape[-1])
t_slice_shape = (key.shape[0], key.shape[-1])
init_prefix_sum_value_numerator = jnp.zeros(prefix_sum_tensor_shape)
init_prefix_sum_value_denominator = jnp.zeros(t_slice_shape)
w = favor_numerator(init_prefix_sum_value_numerator,
jnp.moveaxis(query_prime, 1, 0),
jnp.moveaxis(key_prime, 1, 0),
jnp.moveaxis(value, 1, 0))
r = favor_denominator(init_prefix_sum_value_denominator,
jnp.moveaxis(query_prime, 1, 0),
jnp.moveaxis(key_prime, 1, 0))
w = jnp.moveaxis(w, 0, 1)
r = jnp.moveaxis(r, 0, 1)
r = jnp.reciprocal(r)
r = jnp.expand_dims(r, len(r.shape))
renormalized_attention = w * r
return renormalized_attention
def CausalFavor(d_feature, n_heads=1, dropout=0.0,
numerical_stabilizer=0.001, mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
causal attention, but using FAVOR fast attention as in the following paper:
https://arxiv.org/abs/2006.03555
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
numerical_stabilizer: float, small number used for numerical stability.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
del dropout
return tl.ConfigurableAttention(
core.Dense(d_feature), core.Dense(d_feature), core.Dense(d_feature),
core.Dense(d_feature), n_heads=n_heads,
qkv_attention_layer=tl.CausalFavorAttention(numerical_stabilizer,
mode))
class _RememberInReverse(base.Layer):
"""Layer remembering the input in forward pass. For reversible models."""
def __init__(self, output=True):
"""Layer remembering the input in forward pass. For reversible models.
During the first pass through the model this layer saves the input as
state, and returns the input unmodified. During the second pass through the
model the layer outputs the input from the first pass. This is used to
combat numerical stability problems in Terraformer. It doesn't do anything
in non-reversible models.
Args:
output: Whether to pass the input or not.
"""
n_out = 1 if output else 0
self._output = output
super().__init__(name='_RememberInReverse', n_out=n_out)
def forward(self, x):
if 'running_second_time_yes' in self.state[1]:
result = self.state[0]
else:
result = x
self.state = (x, {'running_second_time': ()})
if self._output:
return result
else:
return tuple()
def init_weights_and_state(self, input_signature):
"""Initializes this layer's weights."""
if isinstance(input_signature, (list, tuple)):
input_signature = input_signature[0]
self.weights = ()
self.state = (jnp.zeros(input_signature.shape, dtype=jnp.int32),
{'running_second_time': ()})
class _RecallQuantMaskInReverse(base.Layer):
"""Layer recalling quant mask from specific _RememberInReverse.
This layer is needed for memory-efficient training of reversible model with
ff chunking. During forward pass it simply returns minus ones, which are
ignored in the controller. During reverse_and_grad it returns a quant_mask
which was memorized (saved to state) by a RememberInReverse layer.
This enable us to save quant_mask right after chunking, and load it again
(when reversing) right before chunking.
"""
def __init__(self, remember_layer, elements):
self._remember_layer = remember_layer
self._elements = elements
super().__init__(name='_RecallQuantMaskInReverse', n_in=1, n_out=2)
def forward(self, x):
if (self._remember_layer.state and
'running_second_time_yes' in self._remember_layer.state[1]):
# It's reverse_and_grad, so we pull the quant_mask from remembering layer.
result = self._remember_layer.state[0]
else:
result = -jnp.ones((x.shape[0], self._elements), dtype=jnp.int32)
return (x, result)
class _SparseFFController(base.Layer):
"""The controller part of Sparse Feed-Forward layer."""
def __init__(self, d_ff, n_elements_in_block, d_lowrank, temperature,
use_bfloat16, mode, kernel_initializer, bias_initializer,
also_return_nondiscrete_output):
"""Returns a sparse feed-forward block."""
n_out = 2 if also_return_nondiscrete_output else 1
super().__init__(name=f'_SparseFFController_{d_ff}', n_in=2, n_out=n_out)
self._use_bfloat16 = use_bfloat16
self._d_ff = d_ff
self._d_lowrank = d_lowrank
# Q: what temperature is actually most useful in training?
self._temperature = temperature if mode == 'train' else 0.0
self._mode = mode
self._n_elements_in_block = n_elements_in_block
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
# Helper numbers as d_ff will be divided by n_elements_in_block.
assert self._d_ff % self._n_elements_in_block == 0
self._d1 = self._d_ff // self._n_elements_in_block
self._d2 = self._n_elements_in_block
self._also_return_nondiscrete_output = also_return_nondiscrete_output
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of same shape and dtype as the input signature used to
initialize this layer.
Returns:
Tensor of same shape and dtype as the input.
"""
x, recalled_quant_mask = x
m1, m2, mb = self.weights
x_shape = x.shape
x = jnp.reshape(x, [-1, x_shape[-1]]) # Easier to operate on flattened x.
# Q: should we add bias and/or put relu after the low-rank m1 dot?
# Replacing multiplication and reshape by this einsum brings training speed
# improvement (see also reshape in initialization).
mask_logits = jnp.einsum('bd,dl,lxy->bxy', x, m1, m2) + mb
if self._also_return_nondiscrete_output:
# Softmax.
mask_logsumexp = fastmath.logsumexp(mask_logits, axis=-1, keepdims=True)
log_mask = mask_logits - mask_logsumexp
mask = jnp.exp(log_mask)
# Gumbel-softmax with straight-through discretization.
if self._temperature == 0.0:
quant_mask = jnp.argmax(log_mask, axis=-1)
else:
u = fastmath.random.uniform(self.rng, mask.shape, jnp.float32, 1e-6,
1.0 - 1e-6)
g = -jnp.log(-jnp.log(u))
quant_mask = jnp.argmax(log_mask + g * self._temperature, axis=-1)
else:
quant_mask = jnp.argmax(mask_logits, axis=-1)
if self._mode == 'train':
# We use recalled_quant_mask if it's different than -1; otherwise
# we use a quant_mask which we have just computed.
quant_mask = jnp.where(recalled_quant_mask == -1,
quant_mask, recalled_quant_mask)
if self._also_return_nondiscrete_output:
return quant_mask, mask
else:
return quant_mask
def init_weights_and_state(self, input_signature):
"""Randomly initializes this layer's weights."""
x_input_signature = input_signature[0]
d_model = x_input_signature.shape[-1]
shape_m1 = (d_model, self._d_lowrank)
shape_m2 = (self._d_lowrank, self._d_ff)
shape_mb = (self._d_ff,)
rng_m1, rng_m2, rng_mb = fastmath.random.split(self.rng, 3)
m1 = self._kernel_initializer(shape_m1, rng_m1)
m2 = self._kernel_initializer(shape_m2, rng_m2)
mb = self._bias_initializer(shape_mb, rng_mb)
if self._use_bfloat16:
m1 = m1.astype(jnp.bfloat16)
m2 = m2.astype(jnp.bfloat16)
mb = mb.astype(jnp.bfloat16)
# Reshapes below, with einsum in feedforward, improve the training speed.
m2 = jnp.reshape(m2, [self._d_lowrank, self._d1, self._d2])
mb = jnp.reshape(mb, [self._d1, self._d2])
self.weights = (m1, m2, mb)
class _SparseFFMain(base.Layer):
"""The main (non-controller) part of Sparse Feed-Forward layer."""
def __init__(self, d_ff, n_elements_in_block, d_lowrank, quant_prob,
use_bfloat16, big_weights_in_bfloat16, mode, kernel_initializer,
bias_initializer, multiply_by_controller_output, kernel_scaling):
"""Returns a sparse feed-forward block."""
n_in = 3 if mode == 'train' or multiply_by_controller_output else 2
super().__init__(name=f'_SparseFFMain_{d_ff}', n_in=n_in, n_out=2)
self._mode = mode
self._use_bfloat16 = use_bfloat16
self._big_weights_in_bfloat16 = big_weights_in_bfloat16
self._d_ff = d_ff
self._d_lowrank = d_lowrank
self._quant_prob = quant_prob
self._n_elements_in_block = n_elements_in_block
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
# Helper numbers as d_ff will be divided by n_elements_in_block.
assert self._d_ff % self._n_elements_in_block == 0
self._d1 = self._d_ff // self._n_elements_in_block
self._d2 = self._n_elements_in_block
self._multiply_by_controller_output = multiply_by_controller_output
self._kernel_scaling = kernel_scaling
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of same shape and dtype as the input signature used to
initialize this layer.
Returns:
Tensor of same shape and dtype as the input.
"""
if self._mode == 'train' or self._multiply_by_controller_output:
quant_mask, mask, x = x
else:
quant_mask, x = x
original_quant_mask = quant_mask
w1, w2, b2 = self.weights
if self._mode == 'predict':
w1 = jnp.transpose(w1, (1, 2, 0)) # dm, d1, d2 -> d1, d2, dm
w2 = jnp.transpose(w2, (1, 0, 2)) # d2, d1, dm -> d1, d2, dm
x_shape = x.shape
x = jnp.reshape(x, [-1, x_shape[-1]]) # Easier to operate on flattened x.
if self._mode == 'train':
# Tricks from Section 2.1 in https://arxiv.org/abs/1801.09797
quant_mask = tl.one_hot(quant_mask, self._n_elements_in_block)
quant_mask = fastmath.stop_gradient(quant_mask)
quant_mask += mask - fastmath.stop_gradient(mask) # straight-through
# We will sometimes (quant_prob of the batches) use the soft-mask instead
# of the quantized mask to improve training stability (see paper above).
select = fastmath.random.uniform(self.rng, (), jnp.float32, 0.0, 1.0)
quant_mask = jnp.where(select < self._quant_prob, quant_mask, mask)
# In training, run full matmul to get benefits from the above tricks.
mid = jnp.einsum('bd,dxy->bxy', x, w1) * quant_mask
relu = jnp.where(mid <= 0, jnp.zeros_like(mid), mid)
if self._multiply_by_controller_output:
# We multiply only for quantized decisions, since for non-quantized
# decisions we've already multiplied the output.
mask_mult = jnp.where(select < self._quant_prob,
mask, jnp.ones_like(mask))
# Stop-gradient is here, because we already have a pass-through gradient
# (for quantized decisions).
mask_mult = fastmath.stop_gradient(mask_mult)
relu = relu * mask_mult
res = jnp.einsum('bxy,yxd->bd', relu, w2) + b2
elif self._mode == 'predict':
# This implementation mimicks inference. It's not efficient for large
# size of joint_batch, but at inference that will be 1 most of the time.
# Shapes:
# quant_mask is [joint_batch, self._d1]
# w1 is [d_model, self._d1, self._d2]
# we'll index w1 with advanced numpy indexing, first range over
# self._d1 times the batch size, second range being quant_mask
batch_size = quant_mask.shape[0]
idx1 = jnp.array([jnp.arange(self._d1)] * batch_size)
# flatten indices and select from w1
idx1 = jnp.reshape(idx1, [-1])
idx2 = jnp.reshape(quant_mask, [-1])
w = w1[idx1, idx2, :] # now we have per-element weights with batch dim
w = jnp.reshape(w, [batch_size, self._d1, -1])
mid = jnp.einsum('ai,aji->aj', x, w)
relu = jnp.where(mid <= 0, jnp.zeros_like(mid), mid)
if self._multiply_by_controller_output:
mask_mult = jnp.take_along_axis(mask, quant_mask[..., None], -1)[..., 0]
relu = relu * mask_mult
# w2 is [self._d1, self._d2, d_model]
v = w2[idx1, idx2, :]
v = jnp.reshape(v, [batch_size, self._d1, -1])
res = jnp.einsum('ai,aij->aj', relu, v) + b2
else:
quant_mask = tl.one_hot(quant_mask, self._n_elements_in_block)
mid = jnp.einsum('bd,dxy->bxy', x, w1) * quant_mask
relu = jnp.where(mid <= 0, jnp.zeros_like(mid), mid)
if self._multiply_by_controller_output:
relu = relu * mask
res = jnp.einsum('bxy,yxd->bd', relu, w2) + b2
return original_quant_mask, jnp.reshape(res, x_shape)
def init_weights_and_state(self, input_signature):
"""Randomly initializes this layer's weights."""
d_model = input_signature[-1].shape[-1]
shape_w1 = (d_model, self._d_ff)
shape_w2 = (self._d_ff, d_model)
shape_b2 = (d_model,)
rng_w1, rng_w2, rng_b2 = fastmath.random.split(self.rng, 3)
if base.N_WEIGHTS_SHARDS > 1:
# In sharded-weights mode, put the weights on CPU on init
# as they will be sharded later.
w1 = tl.on_cpu(self._kernel_initializer(shape_w1, rng_w1))
w2 = tl.on_cpu(self._kernel_initializer(shape_w2, rng_w2))
else:
w1 = self._kernel_initializer(shape_w1, rng_w1)
w2 = self._kernel_initializer(shape_w2, rng_w2)
b2 = self._bias_initializer(shape_b2, rng_b2)
if self._use_bfloat16:
b2 = b2.astype(jnp.bfloat16)
if self._use_bfloat16 or self._big_weights_in_bfloat16:
w1 = w1.astype(jnp.bfloat16)
w2 = w2.astype(jnp.bfloat16)
w1 = jnp.reshape(w1, (-1, self._d1, self._d2))
w2 = jnp.reshape(w2, (self._d2, self._d1, -1))
if self._kernel_scaling:
# This keeps expected variance of the output regardless of N.
w2 = w2 * (self._n_elements_in_block ** 0.5)
self.weights = (w1, w2, b2)
def SparseFF(
d_ff, n_elements_in_block=32, d_lowrank=64, temperature=0.1, quant_prob=0.3,
use_bfloat16=False, big_weights_in_bfloat16=False, mode='train',
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6),
dropout_rate=0.0, dropout_shared_axes=None, ff_chunk_size=0,
multiply_by_controller_output=False, kernel_scaling=False):
"""Returns Feed-forward block with sparsity.
The original (non-sparse) FF block is a triple Dense(d_ff)-Relu-Dense
that takes an input, makes it of size d_ff (usually larger than it was) and
then brings it back to the original size after Relu. It is commonly used in
Transformer models where it often accounts for most of the trainable weights.
The original block can be slow in decoding due to the need to fetch a lot of
weights from memory. This sparse block only allows one non-zero element
in a block of a specified size. This is trained with straight-through Gumbel
softmax trick.
Args:
d_ff: Depth/dimensionality of FeedForward layer.
n_elements_in_block: The sparsity level. The layer is divided into blocks of
this size, and each block has only a single element active.
d_lowrank: The dimensionality of low-rank controller.
temperature: The temperature of the controller during training.
quant_prob: During training this proportion of blocks will have quantized
mask (i.e. a single element active). The rest will use a soft mask.
use_bfloat16: Whether to use bfloat16 for weights.
big_weights_in_bfloat16: : Whether to use bfloat16 for main weights of the
FeedForward layer.
mode: One of `'train'`, `'eval'`, or `'predict'`.
kernel_initializer: Function that creates a matrix of (random) initial
connection weights `W` for the layer.
bias_initializer: Function that creates a vector of (random) initial
bias weights `b` for the layer.
dropout_rate: Probability for dropping an activation value.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks.
multiply_by_controller_output: whether to multiply the middle activation
layer of FF by controller output (i.e. softmax).
kernel_scaling: Whether to scale the kernel matrix (during init) to keep the
variance of the layer output regardless of n_elements_in_block.
"""
if mode == 'train' or multiply_by_controller_output:
also_return_nondiscrete_output = True
else:
also_return_nondiscrete_output = False
controller = _SparseFFController(
d_ff=d_ff, n_elements_in_block=n_elements_in_block,
d_lowrank=d_lowrank, temperature=temperature,
use_bfloat16=use_bfloat16, mode=mode,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
also_return_nondiscrete_output=also_return_nondiscrete_output)
main = [
_SparseFFMain(
d_ff=d_ff, n_elements_in_block=n_elements_in_block,
d_lowrank=d_lowrank, quant_prob=quant_prob, use_bfloat16=use_bfloat16,
big_weights_in_bfloat16=big_weights_in_bfloat16, mode=mode,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
multiply_by_controller_output=multiply_by_controller_output,
kernel_scaling=kernel_scaling),
# quant_mask, emb
tl.Select([1, 0]),
# emb, quant_mask
tl.Dropout(rate=dropout_rate, shared_axes=dropout_shared_axes, mode=mode),
tl.Select([1, 0]),
# quant_mask, emb
]
# We will "remember" quant_mask _after_ chunking, and "recall" this same
# quant_mask during reverse_and_grad _before_ chunking.
remembering = _RememberInReverse(output=False)
recalling = _RecallQuantMaskInReverse(
remember_layer=remembering, elements=d_ff//n_elements_in_block)
return tl.BatchLeadingAxes(tl.Serial(
recalling, # emb, quant_mask
tl.Chunk(chunk_size=ff_chunk_size, layer=tl.Serial(
# emb, quant_mask
tl.Select((0, 1, 0)), # emb, quant_mask, emb
controller, # quant_mask, mask, emb
main, # quant_mask, emb/output
)),
remembering, # emb/output
))
class BlockSparseFF(base.Layer):
"""Feed-forward block with block sparsity.
The original (non-sparse) FF block is a triple Dense(d_ff)-Relu-Dense
that takes an input, makes it of size d_ff (usually larger than it was) and
then brings it back to the original size after Relu. It is commonly used in
Transformer models where it often accounts for most of the trainable weights.
This block sparse layer mimics mixture of experts architecture.
It divides the dimension of d_ff in each weight matrix to # of blocks equal to
n_experts and activates only one non-zero block from the weights matrix.
This is trained with straight-through Gumbel softmax trick.
"""
def __init__(self,
d_ff,
n_experts=64,
temperature=0.7,
mode='train',
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6)):
"""Returns a block sparse feed-forward block."""
super().__init__(name=f'BlockSparseFF_{d_ff}')
self._mode = mode
self._d_ff = d_ff
self._n_experts = n_experts
self._temperature = temperature if mode == 'train' else 0.0
self._n_elements_in_block = d_ff // n_experts
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
assert self._d_ff % self._n_experts == 0
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of same shape and dtype as the input signature used to
initialize this layer.
Returns:
Tensor of same shape and dtype as the input.
"""
m1, w1, w2, b2 = self.weights
x_shape = x.shape
x = jnp.reshape(x, [-1, x_shape[-1]]) # Easier to operate on flattened x.
# Q: check if we need bias and/or put relu after the m1 dot?
mask_logits = jnp.dot(x, m1)
# Softmax.
mask_logsumexp = fastmath.logsumexp(mask_logits, axis=-1, keepdims=True)
log_mask = mask_logits - mask_logsumexp
mask = jnp.exp(log_mask)
# Gumbel-softmax with straight-through discretization.
# TODO(lukaszkaiser, chowdhery): Extract this block and share
rng1, rng2 = fastmath.random.split(self.rng, 2)
u = fastmath.random.uniform(rng1, mask.shape, jnp.float32, 1e-6, 1.0 - 1e-6)
g = -jnp.log(-jnp.log(u))
selected_experts = jnp.argmax(log_mask + g * self._temperature, axis=-1)
if self._mode == 'train':
# Tricks from Section 2.1 in https://arxiv.org/abs/1801.09797
quant_mask = tl.one_hot(selected_experts, self._n_experts)
quant_mask = fastmath.stop_gradient(quant_mask)
quant_mask += mask - fastmath.stop_gradient(mask) # straight-through
# We will sometimes (50% of the batches) use the soft-mask instead of
# the quantized mask to improve training stability (see the paper above).
# Q: is selecting 50% of batches the best? Other %? Mixed in-batch?
select = fastmath.random.uniform(rng2, (), jnp.float32, -1.0, 1.0)
quant_mask = jnp.where(select > 0.0, quant_mask, mask)
else:
quant_mask = tl.one_hot(selected_experts, self._n_experts)
quant_mask = jnp.reshape(quant_mask, [-1, self._n_experts, 1])
batch_size = quant_mask.shape[0]
if self._mode == 'predict' and batch_size == 1:
# This implementation mimicks inference for batch_size 1.
start_idx = selected_experts[0] * self._n_elements_in_block
# w1 is [d_model, d_ff], w is [d_model, n_elements_in_block]
w = fastmath.dynamic_slice(w1, [0, start_idx],
[w1.shape[0], self._n_elements_in_block])
mid = jnp.dot(x, w)
relu = jnp.where(mid <= 0, jnp.zeros_like(mid), mid)
# w2 is [d_ff, d_model], v is [n_elements_in_block, d_model]
v = fastmath.dynamic_slice(w2, [start_idx, 0],
[self._n_elements_in_block, w2.shape[-1]])
v = jnp.reshape(v, [self._n_elements_in_block, -1])
res = jnp.dot(relu, v) + b2
else:
expanded_mask = jnp.broadcast_to(
quant_mask,
(quant_mask.shape[0], quant_mask.shape[1], self._n_elements_in_block))
expanded_mask = jnp.reshape(expanded_mask, (-1, self._d_ff))
mid = jnp.dot(x, w1) * expanded_mask # [joint_batch, d_ff]
relu = jnp.where(mid <= 0, jnp.zeros_like(mid), mid)
res = jnp.dot(relu, w2) + b2
return jnp.reshape(res, x_shape) # un-flatten if needed
def init_weights_and_state(self, input_signature):
"""Randomly initializes this layer's weights."""
d_model = input_signature.shape[-1]
shape_m1 = (d_model, self._n_experts)
shape_w1 = (d_model, self._d_ff)
shape_w2 = (self._d_ff, d_model)
shape_b2 = (d_model,)
rng_m1, rng_w1, rng_w2, rng_b2 = fastmath.random.split(self.rng, 4)
m1 = self._kernel_initializer(shape_m1, rng_m1)
w1 = self._kernel_initializer(shape_w1, rng_w1)
w2 = self._kernel_initializer(shape_w2, rng_w2)
b2 = self._bias_initializer(shape_b2, rng_b2)
self.weights = (m1, w1, w2, b2)
class SwitchSparseFF(base.Layer):
"""Feed-forward block with switch-style block sparsity.
The original (non-sparse) FF block is a triple Dense(d_ff)-Relu-Dense
that takes an input, makes it of size d_ff (usually larger than it was) and
then brings it back to the original size after Relu. It is commonly used in
Transformer models where it often accounts for most of the trainable weights.
This block sparse layer mimics mixture of experts architecture.
It divides the dimension of d_ff in each weight matrix to # of blocks equal to
n_experts and activates only one non-zero block from the weights matrix.
This is trained with methods following the Switch Transformer.
"""
def __init__(self,
d_ff,
n_experts=64,
temperature=0.1,
mode='train',
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6)):
"""Returns a switch-style training block sparse feed-forward block."""
super().__init__(name=f'SwitchSparseFF_{d_ff}')
self._mode = mode
self._d_ff = d_ff
self._n_experts = n_experts
self._temperature = temperature if mode == 'train' else 0.0
self._n_elements_in_block = d_ff // n_experts
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
assert self._d_ff % self._n_experts == 0
def forward(self, x):
"""Executes this layer as part of a forward pass through the model.
Args:
x: Tensor of same shape and dtype as the input signature used to
initialize this layer.
Returns:
Tensor of same shape and dtype as the input.
"""
m1, w1, w2, b2 = self.weights
x_shape = x.shape
x = jnp.reshape(x, [-1, x_shape[-1]]) # Easier to operate on flattened x.
# Q: check if we need bias and/or put relu after the m1 dot?
mask_logits = jnp.dot(x, m1)
# Softmax.
mask_logsumexp = fastmath.logsumexp(mask_logits, axis=-1, keepdims=True)
log_mask = mask_logits - mask_logsumexp
mask = jnp.exp(log_mask)
# Gumbel noise to allow sampling from the softmax.
rng1, _ = fastmath.random.split(self.rng, 2)
u = fastmath.random.uniform(rng1, mask.shape, jnp.float32, 1e-6, 1.0 - 1e-6)
g = -jnp.log(-jnp.log(u))
selected_experts = jnp.argmax(log_mask + g * self._temperature, axis=-1)
quant_mask = tl.one_hot(selected_experts, self._n_experts)
quant_mask = fastmath.stop_gradient(quant_mask)
quant_mask *= mask # go to just the selected expert
quant_mask = jnp.reshape(quant_mask, [-1, self._n_experts, 1])
batch_size = quant_mask.shape[0]
if self._mode == 'predict' and batch_size == 1:
mask_flat = jnp.reshape(mask, [-1, self._n_experts])
selected_flat = jnp.reshape(selected_experts, [-1])
selected_mask_flat = mask_flat[np.arange(selected_flat.size),
selected_flat]
# This implementation mimicks inference for batch_size 1.
start_idx = selected_experts[0] * self._n_elements_in_block
# w1 is [d_model, d_ff], w is [d_model, n_elements_in_block]
w = fastmath.dynamic_slice(w1, [0, start_idx],
[w1.shape[0], self._n_elements_in_block])
mid = jnp.dot(x, w)
mid *= jnp.reshape(selected_mask_flat, mid.shape[:-1])[..., None]
relu = jnp.where(mid <= 0, jnp.zeros_like(mid), mid)
# w2 is [d_ff, d_model], v is [n_elements_in_block, d_model]
v = fastmath.dynamic_slice(w2, [start_idx, 0],
[self._n_elements_in_block, w2.shape[-1]])
v = jnp.reshape(v, [self._n_elements_in_block, -1])
res = jnp.dot(relu, v) + b2
else:
expanded_mask = jnp.broadcast_to(
quant_mask,
(quant_mask.shape[0], quant_mask.shape[1], self._n_elements_in_block))
expanded_mask = jnp.reshape(expanded_mask, (-1, self._d_ff))
mid = jnp.dot(x, w1) * expanded_mask # [joint_batch, d_ff]
relu = jnp.where(mid <= 0, jnp.zeros_like(mid), mid)
res = jnp.dot(relu, w2) + b2
return jnp.reshape(res, x_shape) # un-flatten if needed
def init_weights_and_state(self, input_signature):
"""Randomly initializes this layer's weights."""
d_model = input_signature.shape[-1]
shape_m1 = (d_model, self._n_experts)
shape_w1 = (d_model, self._d_ff)
shape_w2 = (self._d_ff, d_model)
shape_b2 = (d_model,)
rng_m1, rng_w1, rng_w2, rng_b2 = fastmath.random.split(self.rng, 4)
m1 = self._kernel_initializer(shape_m1, rng_m1)
w1 = self._kernel_initializer(shape_w1, rng_w1)
w2 = self._kernel_initializer(shape_w2, rng_w2)
b2 = self._bias_initializer(shape_b2, rng_b2)
self.weights = (m1, w1, w2, b2)
| {
"content_hash": "a235e69b96e5b3bdb8502fe46519091f",
"timestamp": "",
"source": "github",
"line_count": 1718,
"max_line_length": 90,
"avg_line_length": 42.06461001164144,
"alnum_prop": 0.6515698728326899,
"repo_name": "google/trax",
"id": "1ac1c8ca4841b28f837327f376aad09ea76e1536",
"size": "72864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trax/layers/research/sparsity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2441406"
},
{
"name": "Python",
"bytes": "2582897"
},
{
"name": "Shell",
"bytes": "6619"
}
],
"symlink_target": ""
} |
import os
import ujson
from starlette import status
from starlette.concurrency import run_in_threadpool
from starlette.datastructures import UploadFile
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import Response
from polyaxon import settings
from polyaxon.constants.globals import DEFAULT_UPLOADS_PATH
from polyaxon.fs.async_manager import upload_dir, upload_file
from polyaxon.fs.types import FSSystem
from polyaxon.lifecycle import V1ProjectFeature
from polyaxon.utils.path_utils import check_or_create_path, delete_path, untar_file
async def handle_posted_data(
fs: FSSystem,
content_file: UploadFile,
root_path: str,
path: str,
upload: bool,
is_file: bool,
overwrite: bool = True,
untar: bool = True,
) -> str:
tmp_path = "{}/{}".format(
root_path, os.path.basename(content_file.filename)
).rstrip("/")
if path:
root_path = "{}/{}".format(root_path, path).rstrip("/")
if is_file:
root_path = "{}/{}".format(
root_path, os.path.basename(content_file.filename)
)
else:
if untar:
root_path = "{}/{}".format(root_path, DEFAULT_UPLOADS_PATH).rstrip("/")
else:
root_path = tmp_path
if not untar:
tmp_path = root_path
full_tmppath = settings.AGENT_CONFIG.get_local_path(
subpath=tmp_path, entity=V1ProjectFeature.RUNTIME
)
full_filepath = settings.AGENT_CONFIG.get_local_path(
subpath=root_path, entity=V1ProjectFeature.RUNTIME
)
if overwrite and os.path.exists(full_filepath):
delete_path(full_filepath)
if not overwrite and os.path.exists(full_filepath):
return full_filepath
# Always clean tmp path
if overwrite and os.path.exists(full_tmppath):
delete_path(full_tmppath)
check_or_create_path(full_tmppath, is_dir=False)
check_or_create_path(full_filepath, is_dir=not is_file)
# Creating the new file
with open(full_tmppath, "wb") as destination:
for chunk in content_file.file:
destination.write(chunk)
if untar:
await run_in_threadpool(
untar_file, full_tmppath, extract_path=full_filepath, use_filepath=False
)
if upload:
if is_file:
await upload_file(fs=fs, subpath=root_path)
else:
await upload_dir(fs=fs, subpath=root_path)
return root_path
async def handle_upload(fs: FSSystem, request: Request, is_file: bool) -> Response:
form = await request.form()
content_file = form["upload_file"] # type: UploadFile
content_json = form["json"] # type: str
content_json = ujson.loads(content_json) if content_json else {}
run_uuid = request.path_params["run_uuid"]
overwrite = content_json.get("overwrite", True)
untar = content_json.get("untar", True)
path = content_json.get("path", "")
try:
archived_path = await handle_posted_data(
fs=fs,
content_file=content_file,
root_path=run_uuid,
path=path,
upload=True,
is_file=is_file,
overwrite=overwrite,
untar=untar,
)
except Exception as e:
raise HTTPException(
detail="Run's artifacts upload was unsuccessful, "
"an error was raised while uploading the data %s." % e,
status_code=status.HTTP_400_BAD_REQUEST,
)
if not archived_path:
return Response(
content="Artifact not found and not uploaded: filepath={}".format(
archived_path
),
status_code=status.HTTP_404_NOT_FOUND,
)
return Response(status_code=status.HTTP_200_OK)
| {
"content_hash": "eb1afe139754f3dcdea8c6924d344c8f",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 84,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.6342237503306004,
"repo_name": "polyaxon/polyaxon",
"id": "730725a8422d229dbacf9e15a93d59d37a5bc933",
"size": "4386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deploy/polyaxon_deploy/controllers/uploads.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iom', '0037_auto_20161004_1147'),
]
operations = [
migrations.RemoveField(
model_name='cartodb',
name='layer_sql',
),
]
| {
"content_hash": "afb6af5c385f933571b3843f9ff163b7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 43,
"avg_line_length": 19.294117647058822,
"alnum_prop": 0.5853658536585366,
"repo_name": "acaciawater/iom",
"id": "6a0421c819e200e725fb227cbfc2ff6a95be4d98",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iom/migrations/0038_auto_20161004_1345.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "24849"
},
{
"name": "Python",
"bytes": "141471"
}
],
"symlink_target": ""
} |
"""Event parser and human readable log generator."""
from datetime import timedelta
from itertools import groupby
import logging
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.components import sun
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_ENTITY_ID,
ATTR_HIDDEN,
ATTR_NAME,
ATTR_SERVICE,
CONF_EXCLUDE,
CONF_INCLUDE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
EVENT_LOGBOOK_ENTRY,
EVENT_STATE_CHANGED,
EVENT_AUTOMATION_TRIGGERED,
EVENT_SCRIPT_STARTED,
HTTP_BAD_REQUEST,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, State, callback, split_entity_id
from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
from homeassistant.components.homekit.const import (
ATTR_DISPLAY_NAME,
ATTR_VALUE,
DOMAIN as DOMAIN_HOMEKIT,
EVENT_HOMEKIT_CHANGED,
)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_MESSAGE = "message"
CONF_DOMAINS = "domains"
CONF_ENTITIES = "entities"
CONTINUOUS_DOMAINS = ["proximity", "sensor"]
DOMAIN = "logbook"
GROUP_BY_MINUTES = 15
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
CONF_EXCLUDE: vol.Schema(
{
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
),
CONF_INCLUDE: vol.Schema(
{
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
ALL_EVENT_TYPES = [
EVENT_STATE_CHANGED,
EVENT_LOGBOOK_ENTRY,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
EVENT_ALEXA_SMART_HOME,
EVENT_HOMEKIT_CHANGED,
EVENT_AUTOMATION_TRIGGERED,
EVENT_SCRIPT_STARTED,
]
LOG_MESSAGE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
}
)
@bind_hass
def log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
hass.add_job(async_log_entry, hass, name, message, domain, entity_id)
@bind_hass
def async_log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
data = {ATTR_NAME: name, ATTR_MESSAGE: message}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
async def async_setup(hass, config):
"""Listen for download events to download files."""
@callback
def log_message(service):
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.http.register_view(LogbookView(config.get(DOMAIN, {})))
hass.components.frontend.async_register_built_in_panel(
"logbook", "logbook", "hass:format-list-bulleted-type"
)
hass.services.async_register(DOMAIN, "log", log_message, schema=LOG_MESSAGE_SCHEMA)
return True
class LogbookView(HomeAssistantView):
"""Handle logbook view requests."""
url = "/api/logbook"
name = "api:logbook"
extra_urls = ["/api/logbook/{datetime}"]
def __init__(self, config):
"""Initialize the logbook view."""
self.config = config
async def get(self, request, datetime=None):
"""Retrieve logbook entries."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message("Invalid datetime", HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
period = request.query.get("period")
if period is None:
period = 1
else:
period = int(period)
entity_id = request.query.get("entity")
start_day = dt_util.as_utc(datetime) - timedelta(days=period - 1)
end_day = start_day + timedelta(days=period)
hass = request.app["hass"]
def json_events():
"""Fetch events and generate JSON."""
return self.json(
_get_events(hass, self.config, start_day, end_day, entity_id)
)
return await hass.async_add_job(json_events)
def humanify(hass, events):
"""Generate a converted list of events into Entry objects.
Will try to group events if possible:
- if 2+ sensor updates in GROUP_BY_MINUTES, show last
- if home assistant stop and start happen in same minute call it restarted
"""
domain_prefixes = tuple(f"{dom}." for dom in CONTINUOUS_DOMAINS)
# Group events in batches of GROUP_BY_MINUTES
for _, g_events in groupby(
events, lambda event: event.time_fired.minute // GROUP_BY_MINUTES
):
events_batch = list(g_events)
# Keep track of last sensor states
last_sensor_event = {}
# Group HA start/stop events
# Maps minute of event to 1: stop, 2: stop + start
start_stop_events = {}
# Process events
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get("entity_id")
if entity_id.startswith(domain_prefixes):
last_sensor_event[entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired.minute in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired.minute not in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 2
# Yield entries
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get("new_state"))
domain = to_state.domain
# Skip all but the last sensor state
if (
domain in CONTINUOUS_DOMAINS
and event != last_sensor_event[to_state.entity_id]
):
continue
# Don't show continuous sensor value changes in the logbook
if domain in CONTINUOUS_DOMAINS and to_state.attributes.get(
"unit_of_measurement"
):
continue
yield {
"when": event.time_fired,
"name": to_state.name,
"message": _entry_message_from_state(domain, to_state),
"domain": domain,
"entity_id": to_state.entity_id,
"context_id": event.context.id,
"context_user_id": event.context.user_id,
}
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired.minute) == 2:
continue
yield {
"when": event.time_fired,
"name": "Home Assistant",
"message": "started",
"domain": HA_DOMAIN,
"context_id": event.context.id,
"context_user_id": event.context.user_id,
}
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired.minute) == 2:
action = "restarted"
else:
action = "stopped"
yield {
"when": event.time_fired,
"name": "Home Assistant",
"message": action,
"domain": HA_DOMAIN,
"context_id": event.context.id,
"context_user_id": event.context.user_id,
}
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield {
"when": event.time_fired,
"name": event.data.get(ATTR_NAME),
"message": event.data.get(ATTR_MESSAGE),
"domain": domain,
"entity_id": entity_id,
"context_id": event.context.id,
"context_user_id": event.context.user_id,
}
elif event.event_type == EVENT_ALEXA_SMART_HOME:
data = event.data
entity_id = data["request"].get("entity_id")
if entity_id:
state = hass.states.get(entity_id)
name = state.name if state else entity_id
message = "send command {}/{} for {}".format(
data["request"]["namespace"], data["request"]["name"], name
)
else:
message = "send command {}/{}".format(
data["request"]["namespace"], data["request"]["name"]
)
yield {
"when": event.time_fired,
"name": "Amazon Alexa",
"message": message,
"domain": "alexa",
"entity_id": entity_id,
"context_id": event.context.id,
"context_user_id": event.context.user_id,
}
elif event.event_type == EVENT_HOMEKIT_CHANGED:
data = event.data
entity_id = data.get(ATTR_ENTITY_ID)
value = data.get(ATTR_VALUE)
value_msg = f" to {value}" if value else ""
message = "send command {}{} for {}".format(
data[ATTR_SERVICE], value_msg, data[ATTR_DISPLAY_NAME]
)
yield {
"when": event.time_fired,
"name": "HomeKit",
"message": message,
"domain": DOMAIN_HOMEKIT,
"entity_id": entity_id,
"context_id": event.context.id,
"context_user_id": event.context.user_id,
}
elif event.event_type == EVENT_AUTOMATION_TRIGGERED:
yield {
"when": event.time_fired,
"name": event.data.get(ATTR_NAME),
"message": "has been triggered",
"domain": "automation",
"entity_id": event.data.get(ATTR_ENTITY_ID),
"context_id": event.context.id,
"context_user_id": event.context.user_id,
}
elif event.event_type == EVENT_SCRIPT_STARTED:
yield {
"when": event.time_fired,
"name": event.data.get(ATTR_NAME),
"message": "started",
"domain": "script",
"entity_id": event.data.get(ATTR_ENTITY_ID),
"context_id": event.context.id,
"context_user_id": event.context.user_id,
}
def _get_related_entity_ids(session, entity_filter):
from homeassistant.components.recorder.models import States
from homeassistant.components.recorder.util import RETRIES, QUERY_RETRY_WAIT
from sqlalchemy.exc import SQLAlchemyError
import time
timer_start = time.perf_counter()
query = session.query(States).with_entities(States.entity_id).distinct()
for tryno in range(0, RETRIES):
try:
result = [row.entity_id for row in query if entity_filter(row.entity_id)]
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug(
"fetching %d distinct domain/entity_id pairs took %fs",
len(result),
elapsed,
)
return result
except SQLAlchemyError as err:
_LOGGER.error("Error executing query: %s", err)
if tryno == RETRIES - 1:
raise
time.sleep(QUERY_RETRY_WAIT)
def _generate_filter_from_config(config):
from homeassistant.helpers.entityfilter import generate_filter
excluded_entities = []
excluded_domains = []
included_entities = []
included_domains = []
exclude = config.get(CONF_EXCLUDE)
if exclude:
excluded_entities = exclude.get(CONF_ENTITIES, [])
excluded_domains = exclude.get(CONF_DOMAINS, [])
include = config.get(CONF_INCLUDE)
if include:
included_entities = include.get(CONF_ENTITIES, [])
included_domains = include.get(CONF_DOMAINS, [])
return generate_filter(
included_domains, included_entities, excluded_domains, excluded_entities
)
def _get_events(hass, config, start_day, end_day, entity_id=None):
"""Get events for a period of time."""
from homeassistant.components.recorder.models import Events, States
from homeassistant.components.recorder.util import session_scope
entities_filter = _generate_filter_from_config(config)
def yield_events(query):
"""Yield Events that are not filtered away."""
for row in query.yield_per(500):
event = row.to_native()
if _keep_event(event, entities_filter):
yield event
with session_scope(hass=hass) as session:
if entity_id is not None:
entity_ids = [entity_id.lower()]
else:
entity_ids = _get_related_entity_ids(session, entities_filter)
query = (
session.query(Events)
.order_by(Events.time_fired)
.outerjoin(States, (Events.event_id == States.event_id))
.filter(Events.event_type.in_(ALL_EVENT_TYPES))
.filter((Events.time_fired > start_day) & (Events.time_fired < end_day))
.filter(
(
(States.last_updated == States.last_changed)
& States.entity_id.in_(entity_ids)
)
| (States.state_id.is_(None))
)
)
return list(humanify(hass, yield_events(query)))
def _keep_event(event, entities_filter):
domain, entity_id = None, None
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get("entity_id")
if entity_id is None:
return False
# Do not report on new entities
if event.data.get("old_state") is None:
return False
new_state = event.data.get("new_state")
# Do not report on entity removal
if not new_state:
return False
attributes = new_state.get("attributes", {})
# If last_changed != last_updated only attributes have changed
# we do not report on that yet.
last_changed = new_state.get("last_changed")
last_updated = new_state.get("last_updated")
if last_changed != last_updated:
return False
domain = split_entity_id(entity_id)[0]
# Also filter auto groups.
if domain == "group" and attributes.get("auto", False):
return False
# exclude entities which are customized hidden
hidden = attributes.get(ATTR_HIDDEN, False)
if hidden:
return False
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
elif event.event_type == EVENT_AUTOMATION_TRIGGERED:
domain = "automation"
entity_id = event.data.get(ATTR_ENTITY_ID)
elif event.event_type == EVENT_SCRIPT_STARTED:
domain = "script"
entity_id = event.data.get(ATTR_ENTITY_ID)
elif event.event_type == EVENT_ALEXA_SMART_HOME:
domain = "alexa"
elif event.event_type == EVENT_HOMEKIT_CHANGED:
domain = DOMAIN_HOMEKIT
if not entity_id and domain:
entity_id = f"{domain}."
return not entity_id or entities_filter(entity_id)
def _entry_message_from_state(domain, state):
"""Convert a state to a message for the logbook."""
# We pass domain in so we don't have to split entity_id again
if domain in ["device_tracker", "person"]:
if state.state == STATE_NOT_HOME:
return "is away"
return f"is at {state.state}"
if domain == "sun":
if state.state == sun.STATE_ABOVE_HORIZON:
return "has risen"
return "has set"
device_class = state.attributes.get("device_class")
if domain == "binary_sensor" and device_class:
if device_class == "battery":
if state.state == STATE_ON:
return "is low"
if state.state == STATE_OFF:
return "is normal"
if device_class == "connectivity":
if state.state == STATE_ON:
return "is connected"
if state.state == STATE_OFF:
return "is disconnected"
if device_class in ["door", "garage_door", "opening", "window"]:
if state.state == STATE_ON:
return "is opened"
if state.state == STATE_OFF:
return "is closed"
if device_class == "lock":
if state.state == STATE_ON:
return "is unlocked"
if state.state == STATE_OFF:
return "is locked"
if device_class == "plug":
if state.state == STATE_ON:
return "is plugged in"
if state.state == STATE_OFF:
return "is unplugged"
if device_class == "presence":
if state.state == STATE_ON:
return "is at home"
if state.state == STATE_OFF:
return "is away"
if device_class == "safety":
if state.state == STATE_ON:
return "is unsafe"
if state.state == STATE_OFF:
return "is safe"
if device_class in [
"cold",
"gas",
"heat",
"light",
"moisture",
"motion",
"occupancy",
"power",
"problem",
"smoke",
"sound",
"vibration",
]:
if state.state == STATE_ON:
return f"detected {device_class}"
if state.state == STATE_OFF:
return f"cleared (no {device_class} detected)"
if state.state == STATE_ON:
# Future: combine groups and its entity entries ?
return "turned on"
if state.state == STATE_OFF:
return "turned off"
return f"changed to {state.state}"
| {
"content_hash": "815d774e6fe38ad0fd822d0d5a6af8b4",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 87,
"avg_line_length": 32.91639344262295,
"alnum_prop": 0.5388714577419195,
"repo_name": "Cinntax/home-assistant",
"id": "3c5e828765c98d8fca25865ea43e7dad569ed81b",
"size": "20079",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/logbook/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
} |
from __future__ import division
import tensorflow as tf
import numpy as np
from utils import *
class EncDec:
"""
Encoder-Decoder Model (for single sequence future prediction problem)
Encoder could be arbitrarily complex
passes the final state through non-linearity to initlize decoder state
Decoder is alwasy very simple input and output are the same format, just
2-dimensional delta (x,y)
"""
def __init__(self, config):
self.config = config
self.batch_size = config['batch_size']
self.decoder_time_size = config['decoder_time_size']
self.enc_rnn_hid_dim = config['enc_rnn_hid_dim']
self.dec_rnn_hid_dim = config['dec_rnn_hid_dim']
assert (self.enc_rnn_hid_dim == self.dec_rnn_hid_dim) ## right now doesn't support rnns of diff size, feels it might be harder to optimize
self.encoder_input_shape = config['encoder_input_shape']
if self.encoder_input_shape is not None:
self.fc_layers = config['fc_layers']
if len(self.encoder_input_shape) == 5:
self.conv_layers = config['conv_layers']
self.keep_prob_value = config['keep_prob']
if "decoder_init_noise" in config: #stochasticity
self.decoder_init_noise = config['decoder_init_noise']
self.decoder_noise_level = config['noise_level']
else:
self.decoder_init_noise = None
self.decoder_noise_level = None
if "decoder_input_keep_prob" in config:
self.decoder_input_keep_prob = config['decoder_input_keep_prob']
else:
self.decoder_input_keep_prob = None
def build(self):
# placeholders
tf_dec_input = tf.placeholder(tf.float32, [self.batch_size, self.decoder_time_size, 2])
keep_prob = tf.placeholder(tf.float32)
if self.decoder_init_noise is not None:
self.pl_decoder_noise_level = tf.placeholder(tf.float32, [])
if self.decoder_input_keep_prob is not None:
self.pl_decoder_input_keep_prob = tf.placeholder(tf.float32, [])
else:
self.pl_decoder_input_keep_prob = tf.constant(1.)
self.teacher_forcing_stop = tf.placeholder(tf.int32)
tf_enc_input = tf.placeholder(tf.float32, self.encoder_input_shape) ## either (N, T, D), or (N, C, T, Y, X)
# init weights/bias
# [enc] pre-rnn Conv
if self.encoder_input_shape is not None and len(self.encoder_input_shape) == 5:
self.batch_size = self.encoder_input_shape[0]
self.channel_size = self.encoder_input_shape[1]
self.encoder_time_size = self.encoder_input_shape[2]
self.d1 = self.encoder_input_shape[3]
self.d2 = self.encoder_input_shape[4]
W_conv = []
b_conv = []
for layer_ind in xrange(len(self.conv_layers)):
W_conv.append(weight_variable(self.conv_layers[layer_ind]))
b_conv.append(bias_variable([self.conv_layers[layer_ind][-1]]))
SHAPE_convlast = int(np.ceil(self.d1 / (2**len(self.conv_layers))) *
np.ceil(self.d2 / (2**len(self.conv_layers))) *
self.conv_layers[-1][-1])
if self.encoder_input_shape is not None:
# [enc] pre-rnn FC
W_fc = []
b_fc = []
# first fc shape
if len(self.encoder_input_shape) == 5:
shape_zero = SHAPE_convlast
else:
shape_zero = self.encoder_input_shape[-1]
self.fc_layers.insert(0, shape_zero)
for layer_ind in xrange(len(self.fc_layers) - 1):
W_fc.append(weight_variable(
[self.fc_layers[layer_ind], self.fc_layers[layer_ind + 1]]))
b_fc.append(bias_variable([self.fc_layers[layer_ind + 1]]))
# [enc] rnn
enc_cell = tf.contrib.rnn.BasicLSTMCell(self.enc_rnn_hid_dim, state_is_tuple=True)
# [glue] 2 linear weights taking enc states to decoder
tf_glue_1 = tf.Variable(tf.eye(self.dec_rnn_hid_dim))
tf_glue_2 = tf.Variable(tf.eye(self.dec_rnn_hid_dim))
# [dec] pre-rnn
self.W_dec_inp_hid = weight_variable([2, self.dec_rnn_hid_dim])
self.b_dec_inp_hid = bias_variable([self.dec_rnn_hid_dim])
# [dec] rnn
dec_cell = tf.contrib.rnn.BasicLSTMCell(self.dec_rnn_hid_dim, state_is_tuple=True)
# [dec] post-rnn
self.W_dec_out_hid = weight_variable([self.dec_rnn_hid_dim, 2])
# self.b_dec_out_hid = bias_variable([2]) ### probably don't need output bias
## Build Graph
# build eccoder
if self.encoder_input_shape is not None:
if len(self.encoder_input_shape) == 5:
# tf_enc_input (B, C, T, Y, X)
#
tf_r_enc_input = tf.transpose( tf_enc_input, (0,2,3,4,1)) # (B,T,Y,X,C)
tf_r_enc_input = tf.reshape(tf_r_enc_input, # (B*T, Y,X,C)
(self.batch_size*self.encoder_time_size, self.d1,self.d2,self.channel_size))
# conv
h_pool_drop = tf_r_enc_input
for layer_ind in xrange(len(self.conv_layers)):
h_conv = tf.nn.relu(
conv2d(h_pool_drop, W_conv[layer_ind]) + b_conv[layer_ind])
h_pool = max_pool_2x2(h_conv)
h_pool_drop = tf.nn.dropout(h_pool, keep_prob)
h_fc_drop = tf.reshape(h_pool_drop, [-1, SHAPE_convlast])
for layer_ind in xrange(len(self.fc_layers) - 1):
h_fc = tf.nn.relu(tf.matmul(h_fc_drop, W_fc[
layer_ind]) + b_fc[layer_ind])
h_fc_drop = tf.nn.dropout(h_fc, keep_prob)
h_rnn = tf.reshape(h_fc_drop, (self.batch_size, self.encoder_time_size, self.enc_rnn_hid_dim))
elif len(self.encoder_input_shape)== 3:
# tf_enc_input (B, T, D)
self.batch_size = self.encoder_input_shape[0]
self.encoder_time_size = self.encoder_input_shape[1]
self.d = self.encoder_input_shape[2]
#
tf_r_enc_input = tf.reshape(tf_enc_input, # (B*T,D)
(self.batch_size*self.encoder_time_size, self.d))
h_fc_drop = tf_r_enc_input
for layer_ind in xrange(len(self.fc_layers) - 1):
h_fc = tf.nn.relu(tf.matmul(h_fc_drop, W_fc[
layer_ind]) + b_fc[layer_ind])
h_fc_drop = tf.nn.dropout(h_fc, keep_prob)
h_rnn = tf.reshape(h_fc_drop, (self.batch_size, self.encoder_time_size, self.enc_rnn_hid_dim))
# enc-rnn
_, enc_states = tf.contrib.rnn.static_rnn(enc_cell, tf.unstack(tf.transpose(h_rnn, [1,0,2])), dtype=tf.float32)
##
# build decoder
dec_outputs = []
if self.encoder_input_shape is not None:
s1 = tf.matmul(enc_states[0],tf_glue_1)
s2 = tf.matmul(enc_states[1],tf_glue_2)
state = (s1,s2)
else:
state = dec_cell.zero_state(self.batch_size, tf.float32)
# stochasticity
if self.decoder_init_noise is not None:
if self.decoder_init_noise == 'gaussian':
s0=gaussian_noise_layer(state[0], self.pl_decoder_noise_level)
s1=gaussian_noise_layer(state[1], self.pl_decoder_noise_level)
state = (s0,s1)
elif self.decoder_init_noise == 'dropout':
s0 = tf.nn.dropout(state[0], tf.constant(1.) - self.pl_decoder_noise_level)
s1 = tf.nn.dropout(state[1], tf.constant(1.) - self.pl_decoder_noise_level)
state = (s0, s1)
else:
raise NotImplementedError()
with tf.variable_scope("dec_rnn") as scope:
for rnn_step_ind, input_ in enumerate(tf.unstack(tf.transpose(tf_dec_input, [1,0,2]))):
if rnn_step_ind > 0:
scope.reuse_variables()
## output (BATCH, 2)
## select
tf_step_ind = tf.Variable(rnn_step_ind)
input_ = tf.where(tf.greater_equal(tf_step_ind, self.teacher_forcing_stop), output, input_)
input_ = tf.nn.dropout(input_, self.pl_decoder_input_keep_prob)
else: ## first step, always feed-in gt
pass
h_fc = tf.nn.relu(tf.matmul(input_, self.W_dec_inp_hid) + self.b_dec_inp_hid)
h_rnn = h_fc
## RNN cell
h_rnn, state = dec_cell(h_rnn, state)
# fc output
output = tf.matmul(h_rnn, self.W_dec_out_hid)
dec_outputs.append(output)
self.tf_enc_input = tf_enc_input
self.tf_dec_input = tf_dec_input
self.keep_prob = keep_prob
self.outputs = tf.transpose(dec_outputs, [1,0,2]) # -> (BATCH, TIME, 2)
def input(self, dec_input, teacher_forcing_stop = None,
enc_input=None, enc_keep_prob=None,decoder_noise_level=None, decoder_input_keep_prob=None):
# if keep_prob == None: #default, 'training'
# keep_prob = self.value_keep_prob
ret_dict = {}
# ret_dict[self.tf_enc_input] = enc_input
ret_dict[self.tf_dec_input] = dec_input
if teacher_forcing_stop == None: # default, always teacher-force
ret_dict[self.teacher_forcing_stop] = int(self.decoder_time_size)
else:
assert (teacher_forcing_stop >= 1) # has to at least feed in the first frame
ret_dict[self.teacher_forcing_stop] = int(teacher_forcing_stop)
if enc_input is not None:
ret_dict[self.tf_enc_input] = enc_input
if enc_keep_prob is None:
enc_keep_prob = self.keep_prob_value
ret_dict[self.keep_prob] = enc_keep_prob
if decoder_noise_level is None:
ret_dict[self.pl_decoder_noise_level] = self.decoder_noise_level
else:
ret_dict[self.pl_decoder_noise_level] = decoder_noise_level
if self.decoder_input_keep_prob is not None:
if decoder_input_keep_prob is None:
ret_dict[self.pl_decoder_input_keep_prob] = self.decoder_input_keep_prob
else:
ret_dict[self.pl_decoder_input_keep_prob] = decoder_input_keep_prob
return ret_dict
def output(self):
return self.outputs
if __name__ == '__main__':
import yaml
optimize_loss = tf.contrib.layers.optimize_loss
f_model_config = 'config/ed-full-3d.yaml'
model_config = yaml.load(open(f_model_config, 'rb'))['model_config']
model_config['keep_prob'] = 1
## Fake Data
if model_config['encoder_input_shape'] is not None:
enc_input = np.random.rand(*model_config['encoder_input_shape'])
keep_prob = model_config['keep_prob']
dec_input = np.random.rand(model_config['batch_size'], model_config['decoder_time_size'], 2 )
dec_output = np.random.rand(model_config['batch_size'], model_config['decoder_time_size'], 2)
## Build Model
net = EncDec(model_config)
net.build()
## Build Loss
y_ = tf.placeholder(tf.float32, [model_config['batch_size'], model_config['decoder_time_size'], 2])
euclid_loss = tf.reduce_mean(tf.pow(net.output() - y_, 2))
## Build Learning
learning_rate = tf.placeholder(tf.float32, [])
global_step = tf.Variable(0)
train_step = optimize_loss(euclid_loss, global_step, learning_rate,
optimizer=lambda lr: tf.train.AdamOptimizer(lr))
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
if model_config['encoder_input_shape'] is not None:
feed_dict = net.input(dec_input,teacher_forcing_stop = None, enc_input=enc_input, enc_keep_prob=keep_prob)
else:
feed_dict = net.input(dec_input)
feed_dict[learning_rate] = 1e-4
feed_dict[y_] = dec_output
for train_step_ind in xrange(10):
l = sess.run(train_step, feed_dict=feed_dict)
print (l)
print ('.........')
# ### test teacher-forcing [PASSED]
# init_dec = np.random.rand(model_config['batch_size'], 2)
# print ('.........')
# ## following results should be the different
# for _ in xrange(5):
# dec_input = np.random.rand(model_config['batch_size'], model_config['decoder_time_size'], 2 )
# dec_input[:,0] = init_dec
# feed_dict = net.input(dec_input, teacher_forcing_stop=None)
# feed_dict[y_] = dec_output
# l = sess.run(euclid_loss, feed_dict= feed_dict)
# print (l)
# print ('.........')
# ## following results should be the same (prediction mode)
# for _ in xrange(5):
# dec_input = np.random.rand(model_config['batch_size'], model_config['decoder_time_size'], 2 )
# dec_input[:,0] = init_dec
# feed_dict = net.input(dec_input, teacher_forcing_stop=1)
# feed_dict[y_] = dec_output
# l = sess.run(euclid_loss, feed_dict= feed_dict)
# print (l)
| {
"content_hash": "26ec3d7c97f7fb2cffeafc3ebde7a533",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 146,
"avg_line_length": 48.26714801444043,
"alnum_prop": 0.5594614809274495,
"repo_name": "wangkua1/sportvu",
"id": "57a55f70d30c039ce72b8d7f68e775fe6ab22ddf",
"size": "13370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sportvu/model/encdec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "208926"
},
{
"name": "Shell",
"bytes": "6676"
}
],
"symlink_target": ""
} |
import os, datetime, pymongo, configparser
import pandas as pd
from bson import json_util
global_config = None
global_client = None
global_stocklist = None
def getConfig(root_path):
global global_config
if global_config is None:
#print("initial Config...")
global_config = configparser.ConfigParser()
global_config.read(root_path + "/" + "config.ini")
return global_config
def getClient():
global global_client
from pymongo import MongoClient
if global_client is None:
#print("initial DB Client...")
global_client = MongoClient('localhost', 27017)
return global_client
def getCollection(database, collection):
client = getClient()
db = client[database]
return db[collection]
def getStockList(root_path, database, sheet):
global global_stocklist
if global_stocklist is None:
#print("initial Stock List...")
global_stocklist = queryStockList(root_path, database, sheet)
return global_stocklist
def setStockList(df):
global global_stocklist
df.set_index('symbol', inplace=True)
global_stocklist = df
return global_stocklist
def readFromCollection(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find(queryString)
df = pd.DataFrame(list(result))
if df.empty == False: del df['_id']
return df
def writeToCollection(collection, df, id = None):
jsonStrings = df.to_json(orient='records')
bsonStrings = json_util.loads(jsonStrings)
for string in bsonStrings:
if id is not None:
id_string = ''.join([string[item] for item in id])
string['_id'] = id_string
collection.save(string)
def readFromCollectionExtend(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find_one(queryString)
if result is None:
return pd.DataFrame(), {}
return pd.read_json(result['data'], orient='records'), result['metadata']
def writeToCollectionExtend(collection, symbol, df, metadata=None):
jsonStrings = {"_id":symbol, "symbol":symbol, "data":df.to_json(orient='records'), "metadata":metadata}
#bsonStrings = json_util.loads(jsonStrings)
collection.save(jsonStrings)
def writeToCSV(csv_dir, CollectionKey, df):
if os.path.exists(csv_dir) == False:
os.makedirs(csv_dir)
filename = csv_dir + CollectionKey + '.csv'
df.to_csv(filename)
def queryStockList(root_path, database, sheet):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False: df = setStockList(df)
return df
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=0)
if df.empty == False: df = setStockList(df)
return df
return pd.DataFrame()
except Exception as e:
print("queryStockList Exception", e)
return pd.DataFrame()
return pd.DataFrame()
def storeStockList(root_path, database, sheet, df, symbol = None):
CollectionKey = sheet + "_LIST"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
if symbol is not None:
df = df[df.index == symbol].reset_index()
writeToCollection(collection, df, ['symbol'])
# try:
# index_info = collection.index_information()
# print("index info", index_info)
# except Exception as e:
# print(e)
# writeToCollection(collection, df)
# #collection.create_index('symbol', unique=True, drop_dups=True)
# else:
# writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storeStockList Exception", e)
def queryStockPublishDay(root_path, database, sheet, symbol):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = readFromCollection(collection)
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename) == False: return ''
df = pd.read_csv(filename, index_col=["index"])
if df.empty == False:
publishDay = df[df['symbol'] == symbol]
if len(publishDay) == 1:
return publishDay['date'].values[0]
return ''
except Exception as e:
print("queryStockPublishDay Exception", e)
return ''
return ''
def storePublishDay(root_path, database, sheet, symbol, date):
CollectionKey = sheet + "_IPO"
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet) + config.get('Paths', 'CSV_SHARE')
filename = csv_dir + CollectionKey + '.csv'
if os.path.exists(filename):
df = pd.read_csv(filename, index_col=["index"])
publishDate = df[df['symbol'] == symbol]
if publishDate.empty:
df.loc[len(df)] = [symbol, date]
else:
df = pd.DataFrame(columns = ['symbol', 'date'])
df.index.name = 'index'
df.loc[len(df)] = [symbol, date]
writeToCSV(csv_dir, CollectionKey, df)
except Exception as e:
print("storePublishDay Exception", e)
def queryStock(root_path, database, sheet_1, sheet_2, symbol, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
stockList = getStockList(root_path, database, sheet_1)
lastUpdateTime = pd.Timestamp(stockList.loc[symbol][update_key])
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
queryString = { "symbol" : symbol }
df, metadata = readFromCollectionExtend(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
df.set_index('date', inplace=True)
if 'index' in df:
del df['index']
return df, lastUpdateTime
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = csv_dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename, index_col=["date"])
return df, lastUpdateTime
except Exception as e:
print("queryStock Exception", e)
return pd.DataFrame(), lastUpdateTime
return pd.DataFrame(), lastUpdateTime
def storeStock(root_path, database, sheet_1, sheet_2, symbol, df, update_key):
CollectionKey = sheet_1 + sheet_2 + '_DATA'
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = getStockList(root_path, database, sheet_1)
if (stockList[stockList.index == symbol][update_key][0] != now_date):
stockList.set_value(symbol, update_key, now_date)
storeStockList(root_path, database, sheet_1, stockList, symbol)
# df.set_index('date')
# df.index = df.index.astype(str)
# df.sort_index(ascending=True, inplace=True)
try:
if storeType == 1:
collection = getCollection(database, CollectionKey)
df = df.reset_index()
if 'date' in df: df.date = df.date.astype(str)
writeToCollectionExtend(collection, symbol, df, {})
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database)+ config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeStock Exception", e)
def queryNews(root_path, database, sheet, symbol):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
lastUpdateTime = pd.Timestamp(getStockList(root_path, database, 'SHEET_US_DAILY').loc[symbol]['news_update'])
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : symbol }
df = readFromCollection(collection, queryString)
if df.empty: return pd.DataFrame(), lastUpdateTime
#df.set_index('date', inplace=True)
return df, lastUpdateTime
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + symbol + '.csv'
if os.path.exists(filename) == False: return pd.DataFrame(), lastUpdateTime
df = pd.read_csv(filename)
return df, lastUpdateTime
except Exception as e:
print("queryNews Exception", e)
return pd.DataFrame(), lastUpdateTime
return pd.DataFrame(), lastUpdateTime
def storeNews(root_path, database, sheet, symbol, df):
config = getConfig(root_path)
storeType = int(global_config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = getStockList(root_path, database, 'SHEET_US_DAILY')
stockList.set_value(symbol, 'news_update', now_date)
storeStockList(root_path, database, "SHEET_US_DAILY", stockList.reset_index())
df = df.drop_duplicates(subset=['uri'], keep='first')
#df.set_index(['date'], inplace=True)
#df.sort_index(ascending=True, inplace=True)
try:
if storeType == 1:
collection = getCollection(database, sheet)
#df = df.reset_index()
df['symbol'] = symbol
writeToCollection(collection, df, ['symbol', 'uri'])
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeNews Exception", e)
def queryEarnings(root_path, database, sheet, date):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : date }
df, metadata = readFromCollectionExtend(collection, queryString)
return df
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + date + ".csv"
if os.path.exists(filename): return pd.read_csv(filename)
return pd.DataFrame()
except Exception as e:
print("queryEarnings Exception", e)
return pd.DataFrame()
return pd.DataFrame()
def storeEarnings(root_path, database, sheet, date, df):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
try:
if storeType == 1:
collection = getCollection(database, sheet)
writeToCollectionExtend(collection, date, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
writeToCSV(csv_dir, date, df)
except Exception as e:
print("storeNews Exception", e)
def queryTweets(root_path, database, sheet, symbol, col):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
lastUpdateTime = pd.Timestamp('1970-01-01')
try:
if storeType == 1:
collection = getCollection(database, sheet)
queryString = { "symbol" : symbol }
df, metadata = readFromCollectionExtend(collection, queryString)
if 'last_update' in metadata:
lastUpdateTime = pd.Timestamp(metadata['last_update'])
if df.empty: return pd.DataFrame(columns=col), lastUpdateTime
df.set_index('Date')
return df, lastUpdateTime
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + symbol + ".csv"
if os.path.exists(filename) == False: return pd.DataFrame(columns=col), lastUpdateTime
df = pd.read_csv(filename)
if df.empty: return pd.DataFrame(columns=col), lastUpdateTime
if 'last_update' in df:
lastUpdateTime = pd.Timestamp(df['last_update'].iloc[0])
return df, lastUpdateTime
except Exception as e:
print("queryTweets Exception", e)
return pd.DataFrame(columns=col), lastUpdateTime
return pd.DataFrame(columns=col), lastUpdateTime
def storeTweets(root_path, database, sheet, symbol, df):
config = getConfig(root_path)
storeType = int(config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
df = df.drop_duplicates(keep='last')
df = df.sort_values(['Date'], ascending=[False]).reset_index(drop=True)
try:
if storeType == 1:
collection = getCollection(database, sheet)
metadata = {'last_update':now_date}
df = df.reset_index()
writeToCollectionExtend(collection, symbol, df, metadata)
if storeType == 2:
df['last_update'] = now_date
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
writeToCSV(csv_dir, symbol, df)
except Exception as e:
print("storeTweets Exception", e)
def queryCorrelation(root_path, database, sheet):
config = getConfig(root_path)
storeType = int(global_config.get('Setting', 'StoreType'))
try:
if storeType == 1:
collection = getCollection(database, sheet)
return readFromCollection(collection)
if storeType == 2:
dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
filename = dir + "Correlation" + ".csv"
if os.path.exists(filename): return pd.read_csv(filename, index_col=0)
return pd.DataFrame()
except Exception as e:
print("queryCorrelation Exception", e)
return pd.DataFrame()
return pd.DataFrame()
def storeCorrelation(root_path, database, sheet, df):
config = getConfig(root_path)
storeType = int(global_config.get('Setting', 'StoreType'))
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
try:
if storeType == 1:
collection = getCollection(database, sheet)
writeToCollection(collection, df)
if storeType == 2:
csv_dir = root_path + "/" + config.get('Paths', database) + config.get('Paths', sheet)
writeToCSV(csv_dir, "Correlation", df)
except Exception as e:
print("storeCorrelation Exception", e) | {
"content_hash": "e5526488df753a859d082a1d2477eaab",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 133,
"avg_line_length": 36.60348583877995,
"alnum_prop": 0.5974049163740254,
"repo_name": "doncat99/StockRecommendSystem",
"id": "fca25f53bfc0875c28e83696d91f452e023317e5",
"size": "16801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/DataBase/DB_API.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "638390"
},
{
"name": "Shell",
"bytes": "54"
}
],
"symlink_target": ""
} |
from django.db import models
from mercurial import hg, ui
import git
class EMail(models.Model):
address = models.CharField(max_length=512)
user = models.ForeignKey("repokarma.User", related_name="email")
class Meta:
app_label = "repokarma"
class User(models.Model):
username = models.CharField(max_length=64, unique=True)
real_name = models.CharField(max_length=512, null=True)
class Meta:
app_label = "repokarma"
@property
def name(self):
if self.real_name:
return self.real_name
return self.username
class Repository(models.Model):
path = models.CharField(max_length=1024)
repository_type = models.CharField(max_length=32)
class Meta:
unique_together = ('path', 'repository_type')
class Commit(models.Model):
id = models.CharField(max_length=40, primary_key=True)
repository = models.ForeignKey('repokarma.Repository')
nodeid = models.IntegerField(null=True)
timestamp = models.DateTimeField()
user = models.ForeignKey(User)
lines_added = models.IntegerField()
lines_removed = models.IntegerField()
description = models.TextField()
# Required for easy access to children in git
_children = models.ManyToManyField('repokarma.Commit')
def __init__(self, *args, **kwargs):
super(Commit, self).__init__(*args, **kwargs)
self.context = None
if self.pk:
try:
self.repotype = self.repository.repository_type
except Repository.DoesNotExist:
self.repotype = None
if self.repotype == "mercurial":
self._get_hg_changeset()
elif self.repotype == 'git':
self._get_git_changeset()
def _get_hg_changeset(self):
repo = hg.repository(ui.ui(), self.repository.path)
self.context = repo.changectx(self.pk)
def _get_git_changeset(self):
repo = git.Repo(self.repository.path)
self.context = repo.commit(self.pk)
@property
def diffs(self):
if self.repotype == "git":
return self._diff_git()
elif self.repotype == "mercurial":
return self._diff_hg()
def _diff_hg(self):
for diff in self.context.diff():
yield HGDiff(diff)
def _diff_git(self):
for diff in self.context.diff(create_patch=True):
yield GitDiff(diff)
@property
def net_change(self):
return self.lines_added - self.lines_removed
@property
def files(self):
if self.repotype == "mercurial":
return self.context.files
elif self.repotype == "git":
a = set()
b = set()
for diff in self.context.diff():
try:
a.add(diff.a_blob.name)
except AttributeError:
pass
try:
b.add(diff.b_blob.name)
except AttributeError:
pass
return list(a | b)
@property
def filecount(self):
return len(self.context.files)
@property
def parents(self):
if self.repotype == "mercurial":
return [c.hex() for c in self.context.parents()]
elif self.repotype == "git":
return [c.hexsha for c in self.context.parents]
@property
def children(self):
if self.repotype == "mercurial":
return [c.hex() for c in self.context.children()]
elif self.repotype == "git":
return self._children.all().values_list('pk', flat=True)
class Meta:
app_label = "repokarma"
get_latest_by = 'timestamp'
ordering = ['-timestamp']
class HGDiff(object):
def __init__(self, data):
self.data = data
self.cmd = data.split('\n', 1)[0]
self.filename = self.cmd.rsplit(' ', 1)[-1]
class GitDiff(object):
def __init__(self, git_diff):
self.data = git_diff.diff
self.cmd = 'n/a'
filenames = set()
if git_diff.a_blob is not None:
filenames.add(git_diff.a_blob.name)
if git_diff.b_blob is not None:
filenames.add(git_diff.b_blob.name)
self.filename = '+'.join(filenames)
self.context = git_diff | {
"content_hash": "9cdc2e248928face54e375fd9bc5eb9d",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 68,
"avg_line_length": 29.231292517006803,
"alnum_prop": 0.577379567139865,
"repo_name": "kimvais/repokarma",
"id": "8900c05ebc2af5e388aad49efde00e66229e1522",
"size": "5441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repokarma/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7978"
},
{
"name": "Python",
"bytes": "37548"
}
],
"symlink_target": ""
} |
from django import template
# import locale
import math
from decimal import Decimal
register = template.Library()
@register.filter
def keyvalue(dict, key):
try:
return dict[key]
except KeyError:
return ''
@register.filter
def nicenumber(number):
if number and number != 0:
try:
# locale.setlocale(locale.LC_NUMERIC, 'german')
result = '{:10,.0f}'.format(number)
# result = str(round(number, 2))
except TypeError:
result = number
except ValueError:
result = number
else:
result = ''
return result
@register.filter
def nicenumber_100(number):
if number and number != 0:
try:
result = nicenumber(int(number/100)*100)
except TypeError:
result = number
else:
result = ''
return result
@register.filter
def cut(number, digits):
multiplier = Decimal(max(math.pow(10, digits), 1))
rounding_factor = multiplier/Decimal(2)
result = int((number+rounding_factor)/multiplier)*multiplier
return result
@register.filter
def absolute(number):
number = abs(number)
return number | {
"content_hash": "1a382effa7b50084adde6db754922e36",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 64,
"avg_line_length": 23.11764705882353,
"alnum_prop": 0.6166242578456319,
"repo_name": "danst0/Portfolio",
"id": "36e246570d56e3b62b6f6fbcf227de6ca74146ce",
"size": "1179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/templatetags/filters.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3417"
},
{
"name": "JavaScript",
"bytes": "286293"
},
{
"name": "Python",
"bytes": "142056"
},
{
"name": "Shell",
"bytes": "186"
}
],
"symlink_target": ""
} |
from plmrf import *
import numpy as np
import scipy
import time
# Generating a big ring by sampling variables independently,
# then sampling based on each configuration's 'true' potential
nvars = 1000
nsamp = 1000
print("Generating data ...")
indep_data = dict()
for vindex in range(nvars):
samples = np.random.normal(size=nsamp*10)
varname = "x{0}".format(vindex)
indep_data[varname] = samples
# potentials functions are Gaussian kernels
def potential(pindex):
return (1.0/nvars) * np.exp(-np.abs(indep_data["x{0}".format(vindex)], indep_data["x{0}".format((vindex+1) % nvars)]))
unnormalized_density = np.exp(np.sum([potential(p) for p in range(nvars)], axis=0))
relative_density = unnormalized_density / unnormalized_density.sum()
samp_indices = np.random.choice(range(nsamp*10), size=nsamp, p=relative_density)
print("Setting up potentials and variable definitions ...")
data = dict()
var_defs = []
for vindex in range(nvars):
varname = "x{0}".format(vindex)
next_var = "x{0}".format((vindex+1) % nvars)
samples = indep_data[varname][samp_indices]
data[varname] = samples
var_defs.append(VariableDef(varname, samples=samples, num_int_points=10))
potentials = []
tied_params = [[], []]
for vindex in range(nvars):
varname = "x{0}".format(vindex)
next_var = "x{0}".format((vindex+1) % nvars)
potentials.append(GaussianPotential([varname], samples=data, location=0))
potentials.append(GaussianPotential([varname, next_var], samples=data))
tied_params[0].append(len(potentials)-2)
tied_params[1].append(len(potentials)-1)
for p in potentials:
if p.bandwidth < 1e-16:
print(p)
network = LogLinearMarkovNetwork(potentials, var_defs, tied_weights=tied_params)
print("Fitting parameters ...")
start = time.time()
mple_result = network.fit(data, log=True)
end = time.time()
print("Parameter estimation completed in {0} seconds".format(end - start))
print("MPLE optimization result:")
print(mple_result)
| {
"content_hash": "60e18718c30936a81667775832fc4f55",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 122,
"avg_line_length": 32,
"alnum_prop": 0.704133064516129,
"repo_name": "dgarant/pl-markov-network",
"id": "31ae3b5d46bf317355091ceb8d50c93da24b749a",
"size": "2076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/large_non_gaussian.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27303"
}
],
"symlink_target": ""
} |
import importlib
import ckan.plugins as p
from functools import partial
class UserDatasetsPlugin(p.SingletonPlugin):
""""UserDatasetsPlugin
This plugin replaces dataset and resource authentication calls to allow
users with the 'Member' role to create datasets, and edit/delete their
own datasets (but not others).
"""
p.implements(p.IAuthFunctions)
p.implements(p.IActions)
p.implements(p.IConfigurable)
def configure(self, config):
"""Implementation of IConfigurable.configure"""
self.default_auth_module = config.get('userdatasets.default_auth_module', 'ckan.logic.auth')
self.default_action_module = config.get('userdatasets.default_action_module', 'ckan.logic.action')
def get_auth_functions(self):
"""Implementation of IAuthFunctions.get_auth_functions"""
# We override all of create/update/delete for packages, resources and resource views. Our implementation
# takes the default auth function as first parameter, which we apply here as a partial.
auth_functions = {}
for action in ['create', 'update', 'delete']:
default_module = importlib.import_module(self.default_auth_module + '.' + action)
uds_module = importlib.import_module('ckanext.userdatasets.logic.auth.' + action)
for atype in ['package', 'resource', 'resource_view']:
fn_name = atype + '_' + action
if hasattr(default_module, fn_name) and hasattr(uds_module, fn_name):
default_fn = getattr(default_module, fn_name)
uds_fn = getattr(uds_module, fn_name)
auth_functions[fn_name] = partial(uds_fn, default_fn)
return auth_functions
def get_actions(self):
"""Implementation of IActions.get_actions"""
actions = {}
# Override selected actions. Our implementation takes the default auth functions as first parameter, which we
# apply here as a partial
to_override = [
# ('create', ['package_create']),
# ('update', ['package_update']),
('get', ['organization_list_for_user'])
]
for override in to_override:
default_module = importlib.import_module(self.default_action_module + '.' + override[0])
uds_module = importlib.import_module('ckanext.userdatasets.logic.action.' + override[0])
for fn_name in override[1]:
default_fn = getattr(default_module, fn_name)
uds_fn = getattr(uds_module, fn_name)
#actions[fn_name] = partial(uds_fn, default_fn)
actions[fn_name] = uds_fn
return actions
| {
"content_hash": "b38d44a0307c973bb583dcad4af80c73",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 117,
"avg_line_length": 44.36065573770492,
"alnum_prop": 0.6304508499630451,
"repo_name": "CI-WATER/portal",
"id": "604ba029f492e1076f4b95edd4c35dd98325bd2d",
"size": "2706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ckanext-userdatasets/ckanext/userdatasets/plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "270834"
},
{
"name": "JavaScript",
"bytes": "80762"
},
{
"name": "Python",
"bytes": "345089"
}
],
"symlink_target": ""
} |
'''
@author Luke Campbell <LCampbell at ASAScience dot com>
@date Tue Feb 12 09:54:27 EST 2013
@file ion/processes/data/transforms/transform_prime.py
'''
from ion.core.process.transform import TransformDataProcess
from coverage_model import ParameterDictionary
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceProcessClient
from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
from coverage_model import get_value_class
from coverage_model.parameter_types import ParameterFunctionType
from pyon.util.memoize import memoize_lru
from pyon.util.log import log
from pyon.core.exception import NotFound
from pyon.ion.event import EventSubscriber
from ion.util.stored_values import StoredValueManager
from pyon.public import OT
from gevent.event import Event
from gevent.queue import Queue
class TransformPrime(TransformDataProcess):
binding=['output']
'''
Transforms which have an incoming stream and an outgoing stream.
Parameters:
process.stream_id Outgoing stream identifier.
process.exchange_point Route's exchange point.
process.routing_key Route's routing key.
process.queue_name Name of the queue to listen on.
process.routes streams,actor for each route {(stream_input_id, stream_output_id):actor}
Either the stream_id or both the exchange_point and routing_key need to be provided.
'''
def on_start(self):
TransformDataProcess.on_start(self)
self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
self.stored_values = StoredValueManager(self.container)
self.input_data_product_ids = self.CFG.get_safe('process.input_products', [])
self.output_data_product_ids = self.CFG.get_safe('process.output_products', [])
self.lookup_docs = self.CFG.get_safe('process.lookup_docs',[])
self.new_lookups = Queue()
self.lookup_monitor = EventSubscriber(event_type=OT.ExternalReferencesUpdatedEvent,callback=self._add_lookups, auto_delete=True)
self.lookup_monitor.start()
def on_quit(self):
self.lookup_monitor.stop()
TransformDataProcess.on_quit(self)
def _add_lookups(self, event, *args, **kwargs):
if event.origin in self.input_data_product_ids + self.output_data_product_ids:
if isinstance(event.reference_keys, list):
self.new_lookups.put(event.reference_keys)
@memoize_lru(100)
def read_stream_def(self,stream_id):
return self.pubsub_management.read_stream_definition(stream_id=stream_id)
def recv_packet(self, msg, stream_route, stream_id):
process_routes = self.CFG.get_safe('process.routes', {})
for stream_in_id,routes in process_routes.iteritems():
if stream_id == stream_in_id:
for stream_out_id, actor in routes.iteritems():
if actor is None:
rdt_out = self._execute_transform(msg, (stream_in_id, stream_out_id))
self.publish(rdt_out.to_granule(), stream_out_id)
else:
outgoing = self._execute_actor(msg, actor, (stream_in_id, stream_out_id))
self.publish(outgoing, stream_out_id)
def publish(self, msg, stream_out_id):
publisher = getattr(self, stream_out_id)
publisher.publish(msg)
def _load_actor(self, actor):
'''
Returns callable execute method if it exists, otherwise it raises a BadRequest
'''
try:
module = __import__(actor['module'], fromlist=[''])
except ImportError:
log.exception('Actor could not be loaded')
raise
try:
cls = getattr(module, actor['class'])
except AttributeError:
log.exception('Module %s does not have class %s', repr(module), actor['class'])
raise
try:
execute = getattr(cls,'execute')
except AttributeError:
log.exception('Actor class does not contain execute method')
raise
return execute
def _execute_actor(self, msg, actor, streams):
stream_in_id,stream_out_id = streams
stream_def_out = self.read_stream_def(stream_out_id)
params = self.CFG.get_safe('process.params', {})
config = self.CFG.get_safe('process')
#do the stuff with the actor
params['stream_def'] = stream_def_out._id
executor = self._load_actor(actor)
try:
rdt_out = executor(msg, None, config, params, None)
except:
log.exception('Error running actor for %s', self.id)
raise
return rdt_out
def _merge_pdicts(self, pdict1, pdict2):
incoming_pdict = ParameterDictionary.load(pdict1)
outgoing_pdict = ParameterDictionary.load(pdict2)
merged_pdict = ParameterDictionary()
for k,v in incoming_pdict.iteritems():
ordinal, v = v
if k not in merged_pdict:
merged_pdict.add_context(v)
for k,v in outgoing_pdict.iteritems():
ordinal, v = v
if k not in merged_pdict:
merged_pdict.add_context(v)
return merged_pdict
def _merge_rdt(self, stream_def_in, stream_def_out):
incoming_pdict_dump = stream_def_in.parameter_dictionary
outgoing_pdict_dump = stream_def_out.parameter_dictionary
merged_pdict = self._merge_pdicts(incoming_pdict_dump, outgoing_pdict_dump)
rdt_temp = RecordDictionaryTool(param_dictionary=merged_pdict)
return rdt_temp
def _get_lookup_value(self, lookup_value):
if not self.new_lookups.empty():
new_values = self.new_lookups.get()
self.lookup_docs = new_values + self.lookup_docs
lookup_value_document_keys = self.lookup_docs
for key in lookup_value_document_keys:
try:
document = self.stored_values.read_value(key)
if lookup_value in document:
return document[lookup_value]
except NotFound:
log.warning('Specified lookup document does not exist')
return None
def _execute_transform(self, msg, streams):
stream_in_id,stream_out_id = streams
stream_def_in = self.read_stream_def(stream_in_id)
stream_def_out = self.read_stream_def(stream_out_id)
rdt_temp = self._merge_rdt(stream_def_in, stream_def_out)
rdt_in = RecordDictionaryTool.load_from_granule(msg)
for field in rdt_temp.fields:
if not isinstance(rdt_temp._pdict.get_context(field).param_type, ParameterFunctionType):
try:
rdt_temp[field] = rdt_in[field]
except KeyError:
pass
rdt_temp.fetch_lookup_values()
for lookup_field in rdt_temp.lookup_values():
s = lookup_field
stored_value = self._get_lookup_value(rdt_temp.context(s).lookup_value)
if stored_value is not None:
rdt_temp[s] = stored_value
for field in rdt_temp.fields:
if isinstance(rdt_temp._pdict.get_context(field).param_type, ParameterFunctionType):
rdt_temp[field] = rdt_temp[field]
rdt_out = RecordDictionaryTool(stream_definition_id=stream_def_out._id)
for field in rdt_out.fields:
rdt_out[field] = rdt_temp[field]
return rdt_out
| {
"content_hash": "ee1ed14385e8d4421cdef54b4b2b1800",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 136,
"avg_line_length": 40.12698412698413,
"alnum_prop": 0.6313291139240507,
"repo_name": "ooici/coi-services",
"id": "aa20048130bec22c1b05b28d7a678c9ba61280d0",
"size": "7606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ion/processes/data/transforms/transform_prime.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "403012"
},
{
"name": "C++",
"bytes": "251803"
},
{
"name": "CSS",
"bytes": "689"
},
{
"name": "Erlang",
"bytes": "532"
},
{
"name": "JavaScript",
"bytes": "11627"
},
{
"name": "Objective-C",
"bytes": "8918"
},
{
"name": "Python",
"bytes": "7964384"
},
{
"name": "Shell",
"bytes": "9221"
},
{
"name": "nesC",
"bytes": "57712131"
}
],
"symlink_target": ""
} |
import httplib
from flask import request, session, make_response
from flask_restful_swagger import swagger
from flask.ext.restful import Resource
from CairisHTTPError import ObjectNotFoundHTTPError
from data.AssetDAO import AssetDAO
from tools.JsonConverter import json_serialize
from tools.MessageDefinitions import AssetMessage, AssetEnvironmentPropertiesMessage, ValueTypeMessage
from tools.ModelDefinitions import AssetModel as SwaggerAssetModel, AssetEnvironmentPropertiesModel, ValueTypeModel
from tools.SessionValidator import get_session_id, get_model_generator
class AssetsAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all assets',
responseClass=SwaggerAssetModel.__name__,
nickname='assets-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
},
{
"name": "constraint_id",
"description": "An ID used to filter the assets",
"required": False,
"default": -1,
"allowMultiple": False,
"dataType": int.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self):
constraint_id = request.args.get('constraint_id', -1)
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
assets = dao.get_assets(constraint_id=constraint_id)
dao.close()
resp = make_response(json_serialize(assets, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
# region Swagger Doc
@swagger.operation(
notes='Creates a new asset',
nickname='asset-post',
parameters=[
{
"name": "body",
"description": "The serialized version of the new asset to be added",
"required": True,
"allowMultiple": False,
"type": AssetMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def post(self):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
asset = dao.from_json(request)
new_id = dao.add_asset(asset)
dao.close()
resp_dict = {'asset_id': new_id}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
class AssetByEnvironmentNamesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all the asset names associated with a specific environment',
responseClass=SwaggerAssetModel.__name__,
nickname='assets-by-environment-names-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, environment):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
assets = dao.get_asset_names(environment=environment)
dao.close()
resp = make_response(json_serialize(assets, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
class AssetByNameAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get an asset by name',
responseClass=SwaggerAssetModel.__name__,
nickname='asset-by-name-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, name):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
found_asset = dao.get_asset_by_name(name)
dao.close()
resp = make_response(json_serialize(found_asset, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
# region Swagger Doc
@swagger.operation(
notes='Updates an existing asset',
nickname='asset-put',
parameters=[
{
"name": "body",
"description": "The session ID and the serialized version of the asset to be updated",
"required": True,
"allowMultiple": False,
"type": AssetMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided asset name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def put(self, name):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
asset = dao.from_json(request)
dao.update_asset(asset, name=name)
dao.close()
resp_dict = {'message': 'Update successful'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Deletes an existing asset',
nickname='asset-delete',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided asset name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def delete(self, name):
session_id = request.args.get('session_id', None)
dao = AssetDAO(session_id)
dao.delete_asset(name=name)
dao.close()
resp_dict = {'message': 'Asset successfully deleted'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
class AssetByIdAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get an asset by ID',
responseClass=SwaggerAssetModel.__name__,
nickname='asset-by-id-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, id):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
asset = dao.get_asset_by_id(id)
dao.close()
if asset is None:
raise ObjectNotFoundHTTPError('The asset')
resp = make_response(json_serialize(asset, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
class AssetNamesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get a list of assets',
responseClass=str.__name__,
responseContainer="List",
nickname='asset-names-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self):
session_id = request.args.get('session_id', None)
dao = AssetDAO(session_id)
assets_names = dao.get_asset_names()
dao.close()
resp = make_response(json_serialize(assets_names, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
class AssetModelAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get the asset model for a specific environment',
nickname='asset-model-get',
parameters=[
{
"name": "environment",
"description": "The environment to be used for the asset model",
"required": True,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
},
{
"name": "with_concerns",
"description": "Defines if concerns should be included in the model",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"enum": ['0','1'],
"paramType": "query"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, environment):
session_id = get_session_id(session, request)
with_concerns = request.args.get('with_concerns', True)
if with_concerns == '0' or with_concerns == 0:
with_concerns = False
model_generator = get_model_generator()
dao = AssetDAO(session_id)
dot_code = dao.get_asset_model(environment, with_concerns=with_concerns)
dao.close()
if not isinstance(dot_code, str):
raise ObjectNotFoundHTTPError('The model')
resp = make_response(model_generator.generate(dot_code), httplib.OK)
accept_header = request.headers.get('Accept', 'image/svg+xml')
if accept_header.find('text/plain') > -1:
resp.headers['Content-type'] = 'text/plain'
else:
resp.headers['Content-type'] = 'image/svg+xml'
return resp
class AssetEnvironmentPropertiesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get the environment properties for a specific asset',
nickname='asset-envprops-by-name-get',
responseClass=AssetEnvironmentPropertiesModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, asset_name):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
asset_props = dao.get_asset_props(name=asset_name)
dao.close()
resp = make_response(json_serialize(asset_props, session_id=session_id))
resp.contenttype = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Updates the environment properties for a specific asset',
nickname='asset-envprops-by-name-put',
parameters=[
{
"name": "body",
"required": True,
"allowMultiple": False,
"dataType": AssetEnvironmentPropertiesMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def put(self, asset_name):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
asset_prop = dao.from_json(request, to_props=True)
dao.update_asset_properties(asset_prop, name=asset_name)
dao.close()
resp_dict = {'message': 'The asset properties were successfully updated.'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
class AssetTypesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all asset types',
nickname='assets-types-get',
responseClass=ValueTypeModel.__name__,
responseContainer='List',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AssetDAO(session_id)
assets = dao.get_asset_types(environment_name=environment_name)
dao.close()
resp = make_response(json_serialize(assets, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Creates a new asset type',
nickname='asset-type-by-name-post',
parameters=[
{
"name": "body",
"description": "The serialized version of the new asset type to be added",
"required": True,
"allowMultiple": False,
"type": ValueTypeMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def post(self):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AssetDAO(session_id)
new_value_type = dao.type_from_json(request)
asset_type_id = dao.add_asset_type(new_value_type, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Asset type successfully added', 'asset_type_id': asset_type_id}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
class AssetTypeByNameAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get a asset type by name',
nickname='asset-type-by-name-get',
responseClass=ValueTypeModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AssetDAO(session_id)
asset_type = dao.get_asset_type_by_name(name=name, environment_name=environment_name)
dao.close()
resp = make_response(json_serialize(asset_type, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Docs
@swagger.operation(
notes='Updates a asset type',
nickname='asset-type-by-name-put',
parameters=[
{
'name': 'body',
"description": "",
"required": True,
"allowMultiple": False,
'type': ValueTypeMessage.__name__,
'paramType': 'body'
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'The provided file is not a valid XML file'
},
{
'code': httplib.BAD_REQUEST,
'message': '''Some parameters are missing. Be sure 'asset' is defined.'''
}
]
)
# endregion
def put(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AssetDAO(session_id)
asset_type = dao.type_from_json(request)
dao.update_asset_type(asset_type, name=name, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Asset type successfully updated'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Deletes an existing asset type',
nickname='asset-type-by-name-delete',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided asset name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def delete(self, name):
session_id = get_session_id(session, request)
environment_name = request.args.get('environment', '')
dao = AssetDAO(session_id)
dao.delete_asset_type(name=name, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Asset type successfully deleted'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
class AssetValuesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all asset values',
nickname='assets-values-get',
responseClass=ValueTypeModel.__name__,
responseContainer='List',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, environment_name):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
assets = dao.get_asset_values(environment_name=environment_name)
dao.close()
resp = make_response(json_serialize(assets, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
class AssetValueByNameAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get a asset value by name',
nickname='asset-value-by-name-get',
responseClass=ValueTypeModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, name, environment_name):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
asset_value = dao.get_asset_value_by_name(name=name, environment_name=environment_name)
dao.close()
resp = make_response(json_serialize(asset_value, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Docs
@swagger.operation(
notes='Updates a asset value',
nickname='asset-value-by-name-put',
parameters=[
{
'name': 'body',
"description": "",
"required": True,
"allowMultiple": False,
'type': ValueTypeMessage.__name__,
'paramType': 'body'
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'The provided file is not a valid XML file'
},
{
'code': httplib.BAD_REQUEST,
'message': '''Some parameters are missing. Be sure 'asset' is defined.'''
}
]
)
# endregion
def put(self, name, environment_name):
session_id = get_session_id(session, request)
dao = AssetDAO(session_id)
asset_value = dao.type_from_json(request)
dao.update_asset_value(asset_value, name=name, environment_name=environment_name)
dao.close()
resp_dict = {'message': 'Asset type successfully updated'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
| {
"content_hash": "3c690ee493d585d5034d9d475eefdc4c",
"timestamp": "",
"source": "github",
"line_count": 840,
"max_line_length": 115,
"avg_line_length": 32.61071428571429,
"alnum_prop": 0.5129047566896653,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "257569d3b353c03f4e994c0e80a8f98cdaaad1fa",
"size": "27393",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/controllers/AssetController.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
} |
import threading
import time
import core.session
''' Periodically checks if sessions are alive '''
class Extant(object):
def __init__(self, shell):
self.shell = shell
self.check_alive_timer = None
self.check()
def check(self):
if self.check_alive_timer is not None:
self.check_alive_timer.cancel()
self.check_alive_timer = threading.Timer(1.0, self.check)
self.check_alive_timer.daemon = True
self.check_alive_timer.start()
now = time.time()
max_delta = 10
for skey, session in self.shell.sessions.items():
delta = now - session.last_active
#delta = datetime.timedelta(seconds=int(delta))
if session.status == core.session.Session.ALIVE:
if delta > max_delta:
self.shell.play_sound('TIMEOUT')
session.set_dead()
else:
if delta < max_delta:
self.shell.play_sound('RECONNECT')
session.set_reconnect()
| {
"content_hash": "836d8ba8669b021ccd43597c8a2f60c4",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 65,
"avg_line_length": 28.864864864864863,
"alnum_prop": 0.5608614232209738,
"repo_name": "zerosum0x0/koadic",
"id": "e0400fec3fd3f9f4a985532c45a5a4cce096b9ee",
"size": "1068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/extant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1153"
},
{
"name": "C",
"bytes": "152727"
},
{
"name": "C#",
"bytes": "4074"
},
{
"name": "C++",
"bytes": "17602"
},
{
"name": "Dockerfile",
"bytes": "192"
},
{
"name": "JavaScript",
"bytes": "99522"
},
{
"name": "Python",
"bytes": "2958758"
},
{
"name": "VBA",
"bytes": "1700"
},
{
"name": "VBScript",
"bytes": "14154"
},
{
"name": "XSLT",
"bytes": "295"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from datebook import data_views
urlpatterns = patterns('',
url(r'^ajax/event/create/', data_views.event_create, name="event_create"),
url(r'^ajax/event/update/', data_views.event_update, name="event_update"),
url(r'^ajax/event/delete/', data_views.event_delete, name="event_delete"),
url(r'^ajax/event/form/', data_views.event_form_html, name="event_form_html"),
url(r'^ajax/event/data/', data_views.event_data, name="event_data"),
url(r'^ajax/series/form/', data_views.series_form_html, name="series_form_html"),
url(r'^ajax/datebook/(?P<datebook_id>\d+)/events/', data_views.datebook_events, name="datebook_events"),
)
| {
"content_hash": "9775626ba67c65c1c7366bef50008374",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 105,
"avg_line_length": 48.733333333333334,
"alnum_prop": 0.667578659370725,
"repo_name": "aarontropy/django-datebook",
"id": "9017d5bc606d451da7907dfdb7cd32c5644db7b6",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datebook/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "198382"
},
{
"name": "Python",
"bytes": "54727"
}
],
"symlink_target": ""
} |
import _jpype
import datetime
import decimal
import sys
import _jpype
from . import _jclass
from . import _jcustomizer
# Copies of all private base types for reference
_JClass = _jpype._JClass
_JObject = _jpype._JObject
_JException = _jpype._JException
_JNumberLong = _jpype._JNumberLong
_JNumberFloat = _jpype._JNumberFloat
_JComparable = _jpype._JComparable
_JChar = _jpype._JChar
_JBoolean = _jpype._JBoolean
_JArray = _jpype._JArray
_JBuffer = _jpype._JBuffer
if sys.version_info < (3, 8): # pragma: no cover
from typing_extensions import Protocol, runtime_checkable
from typing import Sequence, Mapping, Set # lgtm [py/unused-import]
from typing import SupportsFloat, Callable # lgtm [py/unused-import]
@runtime_checkable
class SupportsIndex(Protocol):
def __index__(self) -> int: ...
else:
# 3.8 onward
from typing import Protocol, runtime_checkable
from typing import SupportsIndex, SupportsFloat # lgtm [py/unused-import]
from typing import Sequence, Mapping, Set, Callable # lgtm [py/unused-import]
# Types we need
@runtime_checkable
class SupportsPath(Protocol):
def __fspath__(self) -> str: ...
@_jcustomizer.JConversion("java.nio.file.Path", instanceof=SupportsPath)
def _JPathConvert(jcls, obj):
Paths = _jpype.JClass("java.nio.file.Paths")
return Paths.get(obj.__fspath__())
@_jcustomizer.JConversion("java.io.File", instanceof=SupportsPath)
def _JFileConvert(jcls, obj):
return jcls(obj.__fspath__())
# To be added in 1.1.x
@_jcustomizer.JConversion("java.lang.Iterable", instanceof=Sequence, excludes=str)
@_jcustomizer.JConversion("java.util.Collection", instanceof=Sequence, excludes=str)
def _JSequenceConvert(jcls, obj):
return _jclass.JClass('java.util.Arrays').asList(obj)
@_jcustomizer.JConversion("java.lang.Iterable", instanceof=Set)
@_jcustomizer.JConversion("java.util.Collection", instanceof=Set)
def _JSetConvert(jcls, obj):
# set does not satisfy PySequence_Check and collection is too broad as it
# would let dict be converted, so we are going to have to convert twice
# for now
return _jclass.JClass('java.util.Arrays').asList(list(obj))
@_jcustomizer.JConversion("java.util.Map", instanceof=Mapping)
def _JMapConvert(jcls, obj):
hm = _jclass.JClass('java.util.HashMap')()
for p, v in obj.items():
hm[p] = v
return hm
# Converters start here
@_jcustomizer.JConversion("java.time.Instant", exact=datetime.datetime)
def _JInstantConversion(jcls, obj):
utc = obj.replace(tzinfo=datetime.timezone.utc).timestamp()
sec = int(utc)
nsec = int((utc - sec) * 1e9)
return jcls.ofEpochSecond(sec, nsec)
if sys.version_info < (3, 6): # pragma: no cover
import pathlib
@_jcustomizer.JConversion("java.nio.file.Path", instanceof=pathlib.PurePath)
def _JPathConvert(jcls, obj):
Paths = _jpype.JClass("java.nio.file.Paths")
return Paths.get(str(obj))
@_jcustomizer.JConversion("java.io.File", instanceof=pathlib.PurePath)
def _JFileConvert(jcls, obj):
return jcls(str(obj))
# Types needed for SQL
@_jcustomizer.JImplementationFor('java.sql.Date')
class _JSQLDate:
def _py(self):
return datetime.date(self.getYear() + 1900, self.getMonth() + 1, self.getDate())
@_jcustomizer.JImplementationFor('java.sql.Time')
class _JSQLTime:
def _py(self):
return datetime.time(self.getHours(), self.getMinutes(), self.getSeconds())
@_jcustomizer.JImplementationFor('java.sql.Timestamp')
class _JDate:
def _py(self):
return datetime.datetime(self.getYear() + 1900, self.getMonth() + 1, self.getDate(),
self.getHours(), self.getMinutes(), self.getSeconds(), self.getNanos() // 1000)
@_jcustomizer.JImplementationFor('java.math.BigDecimal')
class _JBigDecimal:
def _py(self):
return decimal.Decimal(str(self))
@_jcustomizer.JConversion("java.sql.Time", instanceof=datetime.time)
def _toTime(jcls, x):
return jcls(x.hour, x.minute, x.second)
@_jcustomizer.JConversion("java.sql.Date", instanceof=datetime.date)
def _toDate(jcls, x):
return jcls(x.year - 1900, x.month - 1, x.day)
@_jcustomizer.JConversion("java.sql.Timestamp", instanceof=datetime.datetime)
def _toTimestamp(jcls, x):
return jcls(x.year - 1900, x.month - 1, x.day, x.hour, x.minute, x.second, x.microsecond * 1000)
@_jcustomizer.JConversion("java.math.BigDecimal", instanceof=decimal.Decimal)
def _toBigDecimal(jcls, x):
return jcls(str(x))
| {
"content_hash": "444689c3bf14673fbc3bcdceecf885f1",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 112,
"avg_line_length": 30.486486486486488,
"alnum_prop": 0.7005762411347518,
"repo_name": "originell/jpype",
"id": "fc77854d01c3814421a5efb9e35874911757a125",
"size": "5273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jpype/protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2008"
},
{
"name": "C",
"bytes": "10411"
},
{
"name": "C++",
"bytes": "463150"
},
{
"name": "Java",
"bytes": "52542"
},
{
"name": "Objective-C",
"bytes": "369"
},
{
"name": "PowerShell",
"bytes": "1685"
},
{
"name": "Python",
"bytes": "219955"
},
{
"name": "Shell",
"bytes": "1968"
},
{
"name": "XSLT",
"bytes": "3460"
}
],
"symlink_target": ""
} |
"""
Bonus Tutorial: Using generators to return result bodies
Instead of returning a complete result string, you can use the yield
statement to return one result part after another. This may be convenient
in situations where using a template package like CherryPy or Cheetah
would be overkill, and messy string concatenation too uncool. ;-)
"""
import cherrypy
class GeneratorDemo:
def header(self):
return "<html><body><h2>Generators rule!</h2>"
def footer(self):
return "</body></html>"
def index(self):
# Let's make up a list of users for presentation purposes
users = ['Remi', 'Carlos', 'Hendrik', 'Lorenzo Lamas']
# Every yield line adds one part to the total result body.
yield self.header()
yield "<h3>List of users:</h3>"
for user in users:
yield "%s<br/>" % user
yield self.footer()
index.exposed = True
cherrypy.tree.mount(GeneratorDemo())
if __name__ == '__main__':
import os.path
thisdir = os.path.dirname(__file__)
cherrypy.quickstart(config=os.path.join(thisdir, 'tutorial.conf'))
| {
"content_hash": "8f13894b24ec64409fb75db2b2d3d7ba",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 73,
"avg_line_length": 28.26829268292683,
"alnum_prop": 0.635030198446937,
"repo_name": "VHAINNOVATIONS/DmD",
"id": "c5be531f7bcbccd4ce1b29e901b32f42ce952d9e",
"size": "1159",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "scrubber/MIST_2_0_4/src/CherryPy-3.1.2/cherrypy/tutorial/tut08_generators_and_yield.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "258262"
},
{
"name": "HTML",
"bytes": "3057541"
},
{
"name": "Java",
"bytes": "363296"
},
{
"name": "JavaScript",
"bytes": "8682388"
},
{
"name": "Perl",
"bytes": "294110"
},
{
"name": "Perl6",
"bytes": "14166"
},
{
"name": "Prolog",
"bytes": "782419"
},
{
"name": "Python",
"bytes": "3569206"
},
{
"name": "Shell",
"bytes": "6422"
},
{
"name": "XS",
"bytes": "120883"
}
],
"symlink_target": ""
} |
from typing import Optional
from appium.options.common.supports_capabilities import SupportsCapabilities
ENFORCE_FRESH_SIMULATOR_CREATION = 'enforceFreshSimulatorCreation'
class EnforceFreshSimulatorCreationOption(SupportsCapabilities):
@property
def enforce_fresh_simulator_creation(self) -> Optional[bool]:
"""
Whether to create a new simulator for each new test session.
"""
return self.get_capability(ENFORCE_FRESH_SIMULATOR_CREATION)
@enforce_fresh_simulator_creation.setter
def enforce_fresh_simulator_creation(self, value: bool) -> None:
"""
Creates a new simulator in session creation and deletes it in session deletion.
Defaults to false.
"""
self.set_capability(ENFORCE_FRESH_SIMULATOR_CREATION, value)
| {
"content_hash": "0776b4a1912d953bcb4aff1eee277ec6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 87,
"avg_line_length": 36.63636363636363,
"alnum_prop": 0.7245657568238213,
"repo_name": "appium/python-client",
"id": "cea87eb05e57a2bf0186e50d1ea96e5777235cc2",
"size": "1594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appium/options/ios/xcuitest/simulator/enforce_fresh_simulator_creation_option.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "835"
},
{
"name": "Python",
"bytes": "801497"
},
{
"name": "Shell",
"bytes": "3195"
}
],
"symlink_target": ""
} |
"""HandOut db model filelist.
File list.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from . import Base
import re
import mimetypes
from datetime import datetime
from sqlalchemy import Column, func
from sqlalchemy.dialects.mysql import INTEGER, CHAR, VARCHAR, TEXT, TIMESTAMP
class FileList(Base):
__tablename__ = 'filelist'
key = Column(CHAR(40, collation='utf8_unicode_ci'), primary_key=True)
filename = Column(TEXT(charset='utf8'), nullable=False)
file_type = Column(CHAR(10, collation='utf8_unicode_ci'))
present_id = Column(INTEGER, nullable=False)
created = Column(TIMESTAMP, default=datetime.utcnow)
def __init__(self, **kwargs):
self.key = kwargs['key']
self.filename = kwargs['filename']
self.file_type = kwargs['file_type']
self.present_id = kwargs['present_id']
def __repr__(self):
return 'FileList(%s ,%s)' % \
(self.key,self.filename)
@classmethod
def by_key(cls, key, sql_session):
q = sql_session.query(cls)
return q.filter(cls.key == key)
@classmethod
def by_present_id(cls, present_id, sql_session):
q = sql_session.query(cls)
return q.filter(cls.present_id == present_id)
def to_dict(self):
return {
'key' : self.key,
'filename' : self.filename,
# 'filetype' : 'file',
}
| {
"content_hash": "369d736b11ecb554c563b3fa330d053d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 27.14814814814815,
"alnum_prop": 0.6275579809004093,
"repo_name": "chatea/HandOut",
"id": "a9c0597002327ae568403197854489055e41f40f",
"size": "1513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db/filelist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5440"
},
{
"name": "HTML",
"bytes": "47935"
},
{
"name": "Java",
"bytes": "1555957"
},
{
"name": "JavaScript",
"bytes": "1776"
},
{
"name": "Python",
"bytes": "26125"
}
],
"symlink_target": ""
} |
"""engine.SCons.Options.EnumOption
This file defines the option type for SCons allowing only specified
input-values.
Usage example:
opts = Options()
opts.Add(EnumOption('debug', 'debug output and symbols', 'no',
allowed_values=('yes', 'no', 'full'),
map={}, ignorecase=2))
...
if env['debug'] == 'full':
...
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__all__ = ('EnumOption',)
import string
import SCons.Errors
def _validator(key, val, env, vals):
if not val in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s' % (key, val))
def EnumOption(key, help, default, allowed_values, map={}, ignorecase=0):
"""
The input parameters describe a option with only certain values
allowed. They are returned with an appropriate converter and
validator appended. The result is usable for input to
Options.Add().
'key' and 'default' are the values to be passed on to Options.Add().
'help' will be appended by the allowed values automatically
'allowed_values' is a list of strings, which are allowed as values
for this option.
The 'map'-dictionary may be used for converting the input value
into canonical values (eg. for aliases).
'ignorecase' defines the behaviour of the validator:
If ignorecase == 0, the validator/converter are case-sensitive.
If ignorecase == 1, the validator/converter are case-insensitive.
If ignorecase == 2, the validator/converter is case-insensitive and
the converted value will always be lower-case.
The 'validator' tests whether the value is in the list of allowed
values. The 'converter' converts input values according to the
given 'map'-dictionary (unmapped input values are returned
unchanged).
"""
help = '%s (%s)' % (help, string.join(allowed_values, '|'))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env, vals=allowed_values: \
_validator(key, string.lower(val), env, vals)
else:
validator = lambda key, val, env, vals=allowed_values: \
_validator(key, val, env, vals)
# define converter
if ignorecase == 2:
converter = lambda val, map=map: \
string.lower(map.get(string.lower(val), val))
elif ignorecase == 1:
converter = lambda val, map=map: \
map.get(string.lower(val), val)
else:
converter = lambda val, map=map: \
map.get(val, val)
return (key, help, default, validator, converter)
| {
"content_hash": "501512379acc9276212f7bfeba73c4d6",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 73,
"avg_line_length": 36.97029702970297,
"alnum_prop": 0.6695232994108195,
"repo_name": "datalogics/scons",
"id": "d4e2ac1934993c6ce8026d9e2a872f274e263424",
"size": "3734",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/engine/SCons/Options/EnumOption.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1519"
},
{
"name": "HTML",
"bytes": "43855"
},
{
"name": "Perl",
"bytes": "23384"
},
{
"name": "Python",
"bytes": "4756209"
},
{
"name": "Shell",
"bytes": "13866"
}
],
"symlink_target": ""
} |
import configparser
# Define the names of the options
option_names = [
'from-default',
'from-section', 'section-only',
'file-only', 'init-only', 'init-and-file',
'from-vars',
]
# Initialize the parser with some defaults
DEFAULTS = {
'from-default': 'value from defaults passed to init',
'init-only': 'value from defaults passed to init',
'init-and-file': 'value from defaults passed to init',
'from-section': 'value from defaults passed to init',
'from-vars': 'value from defaults passed to init',
}
parser = configparser.ConfigParser(defaults=DEFAULTS)
print('Defaults before loading file:')
defaults = parser.defaults()
for name in option_names:
if name in defaults:
print(' {:<15} = {!r}'.format(name, defaults[name]))
# Load the configuration file
parser.read('with-defaults.ini')
print('\nDefaults after loading file:')
defaults = parser.defaults()
for name in option_names:
if name in defaults:
print(' {:<15} = {!r}'.format(name, defaults[name]))
# Define some local overrides
vars = {'from-vars': 'value from vars'}
# Show the values of all the options
print('\nOption lookup:')
for name in option_names:
value = parser.get('sect', name, vars=vars)
print(' {:<15} = {!r}'.format(name, value))
# Show error messages for options that do not exist
print('\nError cases:')
try:
print('No such option :', parser.get('sect', 'no-option'))
except configparser.NoOptionError as err:
print(err)
try:
print('No such section:', parser.get('no-sect', 'no-option'))
except configparser.NoSectionError as err:
print(err)
| {
"content_hash": "9a4f06a70a2e4ea82e13ccf75a96a5ee",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 65,
"avg_line_length": 29.29090909090909,
"alnum_prop": 0.6765983860955928,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "6c38ea1c45a5895090b10711a992c70a4599c152",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_application_building_blocks/configparser_defaults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
} |
from schevo.schema import *
schevo.schema.prep(locals())
import random
import string
class Frob(E.Entity):
"""Some sort of something that has four holes."""
name = f.string()
_key(name)
_sample_unittest = [
('Frob 1',),
('Frob 2',),
('Frob 3',),
('Frob 4',),
]
_sample_unittest_priority = 1
def q_hole_details(self):
def fn():
details = []
for hole in self.m.holes():
if hole.thread_count == 0:
details.append(hole.v.detail())
else:
details.extend(
hole.v.detail(thread)
for thread
in hole.m.threads('hole_a') + hole.m.threads('hole_b')
)
return details
return Q.Simple(fn, 'Hole Details')
class _Create(T.Create):
def _after_execute(self, db, frob):
# Create holes 1, 2, 3, and 4.
for number in xrange(1, 5):
db.execute(db.Hole.t.create(
frob=frob,
number=number,
))
class Hole(E.Entity):
"""A hole on a frob."""
frob = f.entity('Frob', on_delete=CASCADE)
number = f.integer()
@f.integer()
def thread_count(self):
return self.s.count('Thread')
_key(frob, number)
_hide('t_create', 't_delete')
def v_detail(self, thread=None):
return E.Hole._Detail(self, thread)
class _Detail(V.View):
@f.entity('Hole')
def from_hole(self):
return self.s.entity
thread = f.entity('Thread')
to_hole = f.entity('Hole')
@f.float()
def thickness(self):
return getattr(self.thread, 'thickness', UNASSIGNED)
@selectionmethod
def t_delete_selected_threads(cls, selection):
return cls._DeleteSelectedThreads(selection)
def _setup(self, entity, thread=None):
if thread is not None:
self.thread = thread
if thread.hole_a == self.s.entity:
self.to_hole = thread.hole_b
else:
self.to_hole = thread.hole_a
class _DeleteSelectedThreads(T.DeleteSelected):
def _setup(self):
self._selection = [
detail.thread for detail in self._selection
]
class Pairing(E.Entity):
"""A pairing between two frobs."""
frob = f.entity('Frob')
mate = f.entity('Frob')
_key(frob)
_sample_unittest = [
dict(frob=('Frob 1',),
mate=('Frob 4',),
),
dict(frob=('Frob 3',),
mate=('Frob 2',),
),
]
class _Create(T.Create):
def _setup(self):
self.x.need_reverse = True
def _after_execute(self, db, pairing):
# Create a reverse pairing.
if self.x.need_reverse:
tx = db.Pairing.t.create(
frob=pairing.mate,
mate=pairing.frob,
)
tx.x.need_reverse = False
db.execute(tx)
class Thread(E.Entity):
"""A piece of string between two holes."""
hole_a = f.entity('Hole')
hole_b = f.entity('Hole')
thickness = f.float()
_key(hole_a, hole_b)
_sample_unittest = [
dict(hole_a=dict(frob=('Frob 1',), number=1),
hole_b=dict(frob=('Frob 2',), number=1),
thickness=1.0,
),
dict(hole_a=dict(frob=('Frob 1',), number=1),
hole_b=dict(frob=('Frob 3',), number=2),
thickness=1.1,
),
dict(hole_a=dict(frob=('Frob 1',), number=1),
hole_b=dict(frob=('Frob 4',), number=3),
thickness=1.2,
),
dict(hole_a=dict(frob=('Frob 2',), number=2),
hole_b=dict(frob=('Frob 2',), number=3),
thickness=1.3,
),
dict(hole_a=dict(frob=('Frob 2',), number=3),
hole_b=dict(frob=('Frob 3',), number=1),
thickness=1.4,
),
dict(hole_a=dict(frob=('Frob 3',), number=3),
hole_b=dict(frob=('Frob 4',), number=4),
thickness=1.5,
),
]
def t_create_random_frob(name_length=16):
tx = db.Frob.t.create()
tx.name = ''.join(
random.choice(string.uppercase) for x in xrange(name_length))
relabel(tx, 'Create Random Frob')
return tx
| {
"content_hash": "bd77a67ffa29901f4ea10634803e20ac",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 78,
"avg_line_length": 26.627906976744185,
"alnum_prop": 0.47882096069868996,
"repo_name": "Schevo/schevo",
"id": "7f2622b399ddaedf93ccd78873c34bd61560dddb",
"size": "4580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/reference/schema_namespaces/namespaces_001.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8687"
},
{
"name": "Python",
"bytes": "954297"
}
],
"symlink_target": ""
} |
"""Process sampling profiler output from hardware model.
USAGE: profile <objdump file> <pc dump file>
Prints a breakdown of time spent per function.
- 'objdump file' parameter points to a file that was produced using:
/usr/local/llvm-nyuzi/bin/llvm-objdump -t <path to ELF file>
- 'pc dump file' points to a file that was produced by the verilog model
using +profile=<filename>. It is a list of hexadecimal program counter
samples, one per line.
"""
import sys
import re
symbolre = re.compile(
r'(?P<addr>[A-Fa-f0-9]+) g\s+F\s+\.text\s+[A-Fa-f0-9]+\s+(?P<symbol>\w+)')
def find_function(functions, pc):
"""Given a PC, figure out which function it is in.
Args:
functions: list of (addr: int, name: str)
Returns:
str Name of function.
Raises:
Nothing
"""
low = 0
high = len(functions)
while low < high:
mid = int((low + high) / 2)
if pc < functions[mid][0]:
high = mid
else:
low = mid + 1
if low == len(functions):
return None
return functions[low - 1][1]
def main():
counts = {}
functions = []
# Read symbols
with open(sys.argv[1], 'r') as f:
for line in f.readlines():
got = symbolre.search(line)
if got is not None:
sym = got.group('symbol')
functions += [(int(got.group('addr'), 16), sym)]
counts[sym] = 0
functions.sort(key=lambda a: a[0])
# Read profile trace
with open(sys.argv[2], 'r') as f:
for line in f.readlines():
func = find_function(functions, int(line, 16))
if func is not None:
counts[func] += 1
total_cycles = 0
sorted_tab = []
for name in counts:
sorted_tab += [(counts[name], name)]
total_cycles += counts[name]
for count, name in sorted(sorted_tab, key=lambda func: func[0], reverse=True):
if count == 0:
break
print('{:7d} {:.3f}% {}'.format(count, count / total_cycles * 100, name))
if __name__ == '__main__':
main()
| {
"content_hash": "2f7400dcd45a834cb3c04df3f14e9171",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 82,
"avg_line_length": 26.02469135802469,
"alnum_prop": 0.5616698292220114,
"repo_name": "jbush001/NyuziProcessor",
"id": "1c190489f2035ef2f2f0839a6310185aad78b6ee",
"size": "2714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/misc/profile.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "213194"
},
{
"name": "C",
"bytes": "2962697"
},
{
"name": "C++",
"bytes": "1644391"
},
{
"name": "CMake",
"bytes": "49462"
},
{
"name": "Dockerfile",
"bytes": "1642"
},
{
"name": "Java",
"bytes": "6570"
},
{
"name": "Perl",
"bytes": "4771"
},
{
"name": "Python",
"bytes": "203129"
},
{
"name": "Shell",
"bytes": "6099"
},
{
"name": "SystemVerilog",
"bytes": "966291"
},
{
"name": "Tcl",
"bytes": "471"
}
],
"symlink_target": ""
} |
"""
Group of commands to manage the contexts for the current session.
.. moduleauthor:: Daniel Grunwell (grunny)
"""
import click
from zapcli.exceptions import ZAPError
from zapcli.helpers import validate_regex, zap_error_handler
from zapcli.log import console
@click.group(name='context', short_help='Manage contexts for the current session.')
@click.pass_context
def context_group(ctx):
"""Group of commands to manage the contexts for the current session."""
pass
@context_group.command('list')
@click.pass_obj
def context_list(zap_helper):
"""List the available contexts."""
contexts = zap_helper.zap.context.context_list
if len(contexts):
console.info('Available contexts: {0}'.format(contexts[1:-1]))
else:
console.info('No contexts available in the current session')
@context_group.command('new')
@click.argument('name')
@click.pass_obj
def context_new(zap_helper, name):
"""Create a new context."""
console.info('Creating context with name: {0}'.format(name))
res = zap_helper.zap.context.new_context(contextname=name)
console.info('Context "{0}" created with ID: {1}'.format(name, res))
@context_group.command('include')
@click.option('--name', '-n', type=str, required=True,
help='Name of the context.')
@click.option('--pattern', '-p', type=str, callback=validate_regex,
help='Regex to include.')
@click.pass_obj
def context_include(zap_helper, name, pattern):
"""Include a pattern in a given context."""
console.info('Including regex {0} in context with name: {1}'.format(pattern, name))
with zap_error_handler():
result = zap_helper.zap.context.include_in_context(contextname=name, regex=pattern)
if result != 'OK':
raise ZAPError('Including regex from context failed: {}'.format(result))
@context_group.command('exclude')
@click.option('--name', '-n', type=str, required=True,
help='Name of the context.')
@click.option('--pattern', '-p', type=str, callback=validate_regex,
help='Regex to exclude.')
@click.pass_obj
def context_exclude(zap_helper, name, pattern):
"""Exclude a pattern from a given context."""
console.info('Excluding regex {0} from context with name: {1}'.format(pattern, name))
with zap_error_handler():
result = zap_helper.zap.context.exclude_from_context(contextname=name, regex=pattern)
if result != 'OK':
raise ZAPError('Excluding regex from context failed: {}'.format(result))
@context_group.command('info')
@click.argument('context-name')
@click.pass_obj
def context_info(zap_helper, context_name):
"""Get info about the given context."""
with zap_error_handler():
info = zap_helper.get_context_info(context_name)
console.info('ID: {}'.format(info['id']))
console.info('Name: {}'.format(info['name']))
console.info('Authentication type: {}'.format(info['authType']))
console.info('Included regexes: {}'.format(info['includeRegexs']))
console.info('Excluded regexes: {}'.format(info['excludeRegexs']))
@context_group.command('users')
@click.argument('context-name')
@click.pass_obj
def context_list_users(zap_helper, context_name):
"""List the users available for a given context."""
with zap_error_handler():
info = zap_helper.get_context_info(context_name)
users = zap_helper.zap.users.users_list(info['id'])
if len(users):
user_list = ', '.join([user['name'] for user in users])
console.info('Available users for the context {0}: {1}'.format(context_name, user_list))
else:
console.info('No users configured for the context {}'.format(context_name))
@context_group.command('import')
@click.argument('file-path')
@click.pass_obj
def context_import(zap_helper, file_path):
"""Import a saved context file."""
with zap_error_handler():
result = zap_helper.zap.context.import_context(file_path)
if not result.isdigit():
raise ZAPError('Importing context from file failed: {}'.format(result))
console.info('Imported context from {}'.format(file_path))
@context_group.command('export')
@click.option('--name', '-n', type=str, required=True,
help='Name of the context.')
@click.option('--file-path', '-f', type=str,
help='Output file to export the context.')
@click.pass_obj
def context_export(zap_helper, name, file_path):
"""Export a given context to a file."""
with zap_error_handler():
result = zap_helper.zap.context.export_context(name, file_path)
if result != 'OK':
raise ZAPError('Exporting context to file failed: {}'.format(result))
console.info('Exported context {0} to {1}'.format(name, file_path))
| {
"content_hash": "9ce7aad15cbcd767d67a1873c66bed76",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 96,
"avg_line_length": 35.78195488721804,
"alnum_prop": 0.6677873502836731,
"repo_name": "Grunny/zap-cli",
"id": "4504b8870a13cca17990a2c225d025414354a3ee",
"size": "4759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zapcli/commands/context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "135"
},
{
"name": "Makefile",
"bytes": "390"
},
{
"name": "Python",
"bytes": "103778"
},
{
"name": "Shell",
"bytes": "240"
}
],
"symlink_target": ""
} |
from math import ceil
listOfFactors = lambda n: {i for i in range(1,ceil(abs(n)/2)+1) if n%i == 0}
def removeDuplicates(mylist):
if mylist:
mylist.sort()
last = mylist[-1]
for i in range(len(mylist)-2, -1, -1):
if last == mylist[i]:
del mylist[i]
else:
last = mylist[i]
return mylist
def polyRoots(polyListCoeff):
allFactors = set()
allFactorsListOld = list(allFactors.union(listOfFactors(polyListCoeff[0]),{polyListCoeff[0]},listOfFactors(polyListCoeff[-1]),{polyListCoeff[-1]}))
allFactorsListOld.extend([-1*i for i in allFactorsListOld])
allFactorsList = list()
for k in allFactorsListOld:
for j in allFactorsListOld:
allFactorsList.append(k/j)
allFactorsList = removeDuplicates(allFactorsList)
polyListCoeff.reverse()
roots = [i for i in allFactorsList if sum([pow(i,j)*polyListCoeff[j] for j in range(0,len(polyListCoeff))]) == 0]
factorList = list()
for i in roots:
if i<0:
factorList.append("(x+{})".format(-i))
else:
factorList.append("(x-{})".format(i))
return "".join(factorList)
| {
"content_hash": "61f05c3694f162b5ec853f8450e8a19b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 151,
"avg_line_length": 38.16129032258065,
"alnum_prop": 0.6145393068469992,
"repo_name": "ActiveState/code",
"id": "4f33593ea8cbaf697fd7c5b2d751bdb135eb3f7a",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577974_Polynomial_Factoring_Using_Rational_Root/recipe-577974.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
import os,sys
import xml.etree.ElementTree as ET
def findObjects(xmlFile):
boxes = []
tree = ET.parse(xmlFile)
for child in tree.getroot().findall('object'):
if child.find('difficult').text != '1':
bn = child.find('bndbox')
box = map(float, [bn.find('xmin').text, bn.find('ymin').text, bn.find('xmax').text, bn.find('ymax').text])
area = (box[2]-box[0])*(box[3]-box[1])
# Skip small objects
if area >= 400.0:
boxes.append( box + [child.find('name').text])
return boxes
## MAIN PROGRAM
def mainProgram():
if len(sys.argv) < 4:
print 'Use: extractImageNetBoxes.py fileList xmlDir bboxOutput'
sys.exit()
imageList = [x.replace('\n','') for x in open(sys.argv[1])]
xmlDir = sys.argv[2]
outputFile = sys.argv[3]
out = open(outputFile,'w')
allBoxes = dict()
for img in imageList:
allBoxes[img] = []
boxes = findObjects(xmlDir+'/'+img)
for box in boxes:
out.write(img.replace('.xml','').replace('./','')+' '+' '.join(map(str,map(int,box[0:4]))) + '\n')
out.close()
if __name__ == "__main__":
mainProgram()
| {
"content_hash": "1a41a40b54a27c85c890d9eb08e50e3c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 112,
"avg_line_length": 28.92105263157895,
"alnum_prop": 0.5978161965423112,
"repo_name": "jccaicedo/localization-agent",
"id": "2662e556eb51781ea1a4b363ad19368aaa385700",
"size": "1099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "detection/boxes/extractImageNetBoxes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "619"
},
{
"name": "Python",
"bytes": "391367"
},
{
"name": "Shell",
"bytes": "18874"
}
],
"symlink_target": ""
} |
import base64
import sys
import os
import os.path
import difflib
import jinja2
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
import re
import unicodedata
import json
import logging
import string
import random
import six.moves.cPickle as pickle
from hashlib import sha1
from datetime import datetime, timedelta
from collections import defaultdict, OrderedDict
import shlex
import socket
from functools import partial
from io import BytesIO
import cgi
import emoji
import tg
import six
import cchardet as chardet
import pkg_resources
from formencode.validators import FancyValidator
from dateutil.parser import parse
from bson import ObjectId
from paste.deploy import appconfig
from pymongo.errors import InvalidId
from contextlib import contextmanager
from tg import tmpl_context as c, app_globals as g
from tg import response, request
from tg.decorators import before_validate
from formencode.variabledecode import variable_decode
import formencode
from markupsafe import Markup
from jinja2.filters import escape, do_filesizeformat
from jinja2.utils import pass_context, htmlsafe_json_dumps
from paste.deploy.converters import asbool, aslist, asint
from webhelpers2 import date, text
from webob.exc import HTTPUnauthorized
from allura.lib import exceptions as exc
from allura.lib import utils
import urllib.parse as urlparse
from urllib.parse import urlencode
import math
from webob.multidict import MultiDict
# import to make available to templates, don't delete:
from .security import has_access, is_allowed_by_role, is_site_admin
log = logging.getLogger(__name__)
# http://stackoverflow.com/questions/2063213/regular-expression-for-validating-dns-label-host-name
# modified to remove capital A-Z and make length parameterized
# and not use lookbehind assertion since JS doesn't support that
dns_var_length = r'^(?![0-9]+$)(?!-)[a-z0-9-]{%s}[a-z0-9]$'
# project & tool names must comply to DNS since used in subdomains for emailing
re_mount_points = {
're_project_name': dns_var_length % '2,14', # validates project, subproject, and user names
're_tool_mount_point': dns_var_length % '0,62', # validates tool mount point names
're_tool_mount_point_fragment': r'[a-z][-a-z0-9]*',
're_relaxed_tool_mount_point': r'^[a-zA-Z0-9][-a-zA-Z0-9_\.\+]{0,62}$',
're_relaxed_tool_mount_point_fragment': r'[a-zA-Z0-9][-a-zA-Z0-9_\.\+]*'
}
# validates project, subproject, and user names
re_project_name = re.compile(re_mount_points['re_project_name'])
# validates tool mount point names
re_tool_mount_point = re.compile(re_mount_points['re_tool_mount_point'])
re_tool_mount_point_fragment = re.compile(re_mount_points['re_tool_mount_point_fragment'])
re_relaxed_tool_mount_point = re.compile(re_mount_points['re_relaxed_tool_mount_point'])
re_relaxed_tool_mount_point_fragment = re.compile(re_mount_points['re_relaxed_tool_mount_point_fragment'])
re_clean_vardec_key = re.compile(r'''\A
( # first part
\w+# name...
(-\d+)?# with optional -digits suffix
)
(\. # next part(s)
\w+# name...
(-\d+)?# with optional -digits suffix
)+
\Z''', re.VERBOSE)
# markdown escaping regexps
re_amp = re.compile(r'''
[&] # amp
(?= # look ahead for:
([a-zA-Z0-9]+;) # named HTML entity
|
(\#[0-9]+;) # decimal entity
|
(\#x[0-9A-F]+;) # hex entity
)
''', re.VERBOSE)
re_leading_spaces = re.compile(r'^[\t ]+', re.MULTILINE)
re_preserve_spaces = re.compile(r'''
[ ] # space
(?=[ ]) # lookahead for a space
''', re.VERBOSE)
re_angle_bracket_open = re.compile('<')
re_angle_bracket_close = re.compile('>')
md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!\\\.+-])")
def make_safe_path_portion(ustr, relaxed=True):
"""Return an ascii representation of ``ustr`` that conforms to mount point
naming :attr:`rules <re_tool_mount_point_fragment>`.
Will return an empty string if no char in ``ustr`` is ascii-encodable.
:param relaxed: Use relaxed mount point naming rules (allows more
characters. See :attr:`re_relaxed_tool_mount_point_fragment`.
:returns: The converted string.
"""
regex = (re_relaxed_tool_mount_point_fragment if relaxed else
re_tool_mount_point_fragment)
ustr = really_unicode(ustr)
s = ustr.encode('ascii', 'ignore')
s = six.ensure_text(s)
if not relaxed:
s = s.lower()
s = '-'.join(regex.findall(s))
s = s.replace('--', '-')
return s
def escape_json(data) -> str:
# Templates should use `|tojson` instead of this
return str(htmlsafe_json_dumps(data)) # str() to keep previous behavior of being str, not MarkupSafe
def querystring(request, url_params):
"""
add/update/remove url parameters. When a value is set to None the key will
be removed from the final constructed url.
:param request: request object
:param url_params: dict with the params that should be updated/added/deleted.
:return: a full url with updated url parameters.
"""
params = urlparse.parse_qs(request.query_string)
params.update(url_params)
for param in list(params.keys()):
if params[param] is None:
del params[param]
# flatten dict values
params = {k: v[0] if isinstance(v, list) else v for k, v in params.items()}
url_parts = urlparse.urlparse(request.url)
url = url_parts._replace(query=urlencode(params)).geturl()
return url
def ceil(number):
return math.ceil(number)
def strip_bad_unicode(s):
"""
xml doesn't like some control characters: https://www.w3.org/TR/REC-xml/#charsets
:param s:
:return:
"""
return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', '', s)
def monkeypatch(*objs):
def patchem(func):
for obj in objs:
setattr(obj, func.__name__, func)
return patchem
def urlquote(url, safe=b"/"):
try:
return six.moves.urllib.parse.quote(str(url), safe=safe)
except UnicodeEncodeError:
return six.moves.urllib.parse.quote(url.encode('utf-8'), safe=safe)
def urlquoteplus(url, safe=b""):
try:
return six.moves.urllib.parse.quote_plus(str(url), safe=safe)
except UnicodeEncodeError:
return six.moves.urllib.parse.quote_plus(url.encode('utf-8'), safe=safe)
def urlquote_path_only(url):
"""
Given a relative url like /fö/bar/?sdf&sdf
urlquote only the path portion of it, leaving any querystring or target hash unquoted
:param url:
:return:
"""
if '?' in url:
url_path, url_joiner, url_remainder = url.partition('?')
elif '#' in url:
url_path, url_joiner, url_remainder = url.partition('#')
else:
url_path = url
url_joiner = url_remainder = ''
return urlquote(url_path) + url_joiner + url_remainder
def _attempt_encodings(s, encodings):
if s is None:
return ''
for enc in encodings:
try:
if enc is None:
if six.PY3 and isinstance(s, bytes):
# special handling for bytes (avoid b'asdf' turning into "b'asfd'")
return s.decode('utf-8')
return str(s) # try default encoding, and handle other types like int, etc
else:
return str(s, enc)
except (UnicodeDecodeError, LookupError):
pass
# Return the repr of the str -- should always be safe
return str(repr(str(s)))[1:-1]
def really_unicode(s):
if isinstance(s, str):
# default case. Also lets Markup() instances be preserved
return s
# Try to guess the encoding
def encodings():
yield None
yield 'utf-8'
yield chardet.detect(s[:1024])['encoding']
yield chardet.detect(s)['encoding']
yield 'latin-1'
return _attempt_encodings(s, encodings())
def find_user(email):
from allura import model as M
return M.User.by_email_address(email)
def find_project(url_path):
from allura import model as M
for n in M.Neighborhood.query.find():
if url_path.strip("/").startswith(n.url_prefix.strip("/")):
break
else:
return None, url_path
# easily off-by-one, might be better to join together everything but
# url_prefix
project_part = n.shortname_prefix + url_path[len(n.url_prefix):]
parts = project_part.split('/')
length = len(parts)
while length:
shortname = '/'.join(parts[:length])
p = M.Project.query.get(shortname=shortname, deleted=False,
neighborhood_id=n._id)
if p:
return p, parts[length:]
length -= 1
return None, url_path.split('/')
def make_neighborhoods(ids):
return _make_xs('Neighborhood', ids)
def make_roles(ids):
return _make_xs('ProjectRole', ids)
def _make_xs(X, ids):
from allura import model as M
X = getattr(M, X)
ids = list(ids)
results = {
r._id: r
for r in X.query.find(dict(_id={'$in': ids}))}
result = (results.get(i) for i in ids)
return (r for r in result if r is not None)
def make_app_admin_only(app):
from allura.model.auth import ProjectRole
admin_role = ProjectRole.by_name('Admin', app.project)
for ace in [ace for ace in app.acl if ace.role_id != admin_role._id]:
app.acl.remove(ace)
@contextmanager
def push_config(obj, **kw):
# if you need similar for a dict, use mock.patch.dict
saved_attrs = {}
new_attrs = []
for k, v in kw.items():
try:
saved_attrs[k] = getattr(obj, k)
except AttributeError:
new_attrs.append(k)
setattr(obj, k, v)
try:
yield obj
finally:
for k, v in saved_attrs.items():
setattr(obj, k, v)
for k in new_attrs:
delattr(obj, k)
def sharded_path(name, num_parts=2):
parts = [
name[:i + 1]
for i in range(num_parts)]
return '/'.join(parts)
def set_context(project_shortname_or_id, mount_point=None, app_config_id=None, neighborhood=None):
"""
Set ``c.project`` and ``c.app`` globals
:param project_id: _id or shortname of a project
:type project_id: ObjectId|str
:param mount_point: mount point to set c.app by
:type mount_point: str
:param app_config_id: alternative to mount_point parameter
:type app_config_id: ObjectId|str
:param neighborhood: neighborhood full name, required if project is specified by shortname
:type neighborhood: str
"""
from allura import model
try:
p = model.Project.query.get(_id=ObjectId(str(project_shortname_or_id)))
except InvalidId:
p = None
if p is None and not isinstance(project_shortname_or_id, ObjectId):
if neighborhood is None:
raise TypeError('neighborhood is required; it must not be None')
if not isinstance(neighborhood, model.Neighborhood):
n = model.Neighborhood.query.get(name=neighborhood)
if n is None:
try:
n = model.Neighborhood.query.get(
_id=ObjectId(str(neighborhood)))
except InvalidId:
pass
if n is None:
raise exc.NoSuchNeighborhoodError(
"Couldn't find neighborhood %s" %
repr(neighborhood))
neighborhood = n
query = dict(shortname=project_shortname_or_id,
neighborhood_id=neighborhood._id)
p = model.Project.query.get(**query)
if p is None:
raise exc.NoSuchProjectError("Couldn't find project %s nbhd %s" %
(project_shortname_or_id, neighborhood))
c.project = p
if app_config_id is None:
c.app = p.app_instance(mount_point)
else:
if isinstance(app_config_id, str):
app_config_id = ObjectId(app_config_id)
app_config = model.AppConfig.query.get(_id=app_config_id)
c.app = p.app_instance(app_config)
@contextmanager
def push_context(project_id, mount_point=None, app_config_id=None, neighborhood=None):
"""
A context manager to set ``c.project`` and ``c.app`` globals temporarily.
To set ``c.user`` or others, use ``push_config(c, user=...)``
:param project_id: _id or shortname of a project
:type project_id: ObjectId|str
:param mount_point: mount point to set c.app by
:type mount_point: str
:param app_config_id: alternative to mount_point parameter
:type app_config_id: ObjectId|str
:param neighborhood: neighborhood full name, required if project is specified by shortname
:type neighborhood: str
"""
project = getattr(c, 'project', ())
app = getattr(c, 'app', ())
set_context(project_id, mount_point, app_config_id, neighborhood)
try:
yield
finally:
if project == ():
del c.project
else:
c.project = project
if app == ():
del c.app
else:
c.app = app
def encode_keys(d):
'''Encodes the unicode keys of d, making the result
a valid kwargs argument'''
return {
six.ensure_str(k): v
for k, v in d.items()}
def vardec(fun):
def vardec_hook(remainder, params):
new_params = variable_decode({
k: v for k, v in params.items()
if re_clean_vardec_key.match(k)})
params.update(new_params)
before_validate(vardec_hook)(fun)
return fun
def convert_bools(conf, prefix=''):
'''
For a given dict, automatically convert any true/false string values into bools.
Only applies to keys starting with the prefix.
:param dict conf:
:param str prefix:
:return: dict
'''
def convert_value(val):
if isinstance(val, str):
if val.strip().lower() == 'true':
return True
elif val.strip().lower() == 'false':
return False
return val
return {
k: (convert_value(v) if k.startswith(prefix) else v)
for k, v in conf.items()
}
def nonce(length=4):
return sha1(ObjectId().binary + os.urandom(10)).hexdigest()[:length]
def cryptographic_nonce(length=40):
rand_bytes = os.urandom(length)
rand_ints = tuple(rand_bytes)
hex_format = '%.2x' * length
return hex_format % rand_ints
def random_password(length=20, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(length))
def ago(start_time, show_date_after=7):
"""
Return time since starting time as a rounded, human readable string.
E.g., "3 hours ago"
Also works with future times
E.g., "in 3 hours"
"""
if start_time is None:
return 'unknown'
granularities = ['century', 'decade', 'year', 'month', 'day', 'hour', 'minute', 'second']
end_time = datetime.utcnow()
if show_date_after is not None and abs(end_time - start_time) > timedelta(days=show_date_after):
return start_time.strftime('%Y-%m-%d')
while True:
granularity = granularities.pop()
ago = date.distance_of_time_in_words(start_time, end_time, granularity, round=True)
rounded_to_one_granularity = 'and' not in ago
if rounded_to_one_granularity:
break
if (end_time - start_time).total_seconds() >= 0:
return ago + ' ago'
else:
return 'in ' + ago
def ago_ts(timestamp):
return ago(datetime.utcfromtimestamp(timestamp))
def ago_string(s):
try:
return ago(parse(s, ignoretz=True))
except (ValueError, AttributeError, TypeError):
return 'unknown'
class DateTimeConverter(FancyValidator):
def _to_python(self, value, state):
try:
return parse(value)
except (ValueError, TypeError):
if self.if_invalid != formencode.api.NoDefault:
return self.if_invalid
else:
raise
def _from_python(self, value, state):
return value.isoformat()
def absurl(url):
"""
Given a root-relative URL, return a full URL including protocol and host
"""
if url is None:
return None
if '://' in url:
return url
host = tg.config['base_url'].rstrip('/')
return host + url
def diff_text(t1, t2, differ=None):
t1_lines = t1.replace('\r', '').split('\n')
t2_lines = t2.replace('\r', '').split('\n')
t1_words = []
for line in t1_lines:
for word in line.split(' '):
t1_words.append(word)
t1_words.append('\n')
t2_words = []
for line in t2_lines:
for word in line.split(' '):
t2_words.append(word)
t2_words.append('\n')
if differ is None:
differ = difflib.SequenceMatcher(None, t1_words, t2_words)
result = []
def escape_list(words_list):
return [cgi.escape(words) for words in words_list]
for tag, i1, i2, j1, j2 in differ.get_opcodes():
if tag in ('delete', 'replace'):
result += ['<del>'] + escape_list(t1_words[i1:i2]) + ['</del>']
if tag in ('insert', 'replace'):
result += ['<ins>'] + escape_list(t2_words[j1:j2]) + ['</ins>']
if tag == 'equal':
result += escape_list(t1_words[i1:i2])
return Markup(' '.join(result).replace('\n', '<br/>\n'))
def gen_message_id(_id=None):
if not _id:
_id = nonce(40)
if getattr(c, 'project', None):
parts = c.project.url().split('/')[1:-1]
else:
parts = ['mail']
if getattr(c, 'app', None):
addr = '{}.{}'.format(_id, c.app.config.options['mount_point'])
else:
addr = _id
return '{}@{}.{}'.format(
addr, '.'.join(reversed(parts)), tg.config['domain'])
class ProxiedAttrMeta(type):
def __init__(cls, name, bases, dct):
for v in dct.values():
if isinstance(v, attrproxy):
v.cls = cls
class attrproxy:
cls = None
def __init__(self, *attrs):
self.attrs = attrs
def __repr__(self):
return '<attrproxy on {} for {}>'.format(
self.cls, self.attrs)
def __get__(self, obj, klass=None):
if obj is None:
obj = klass
for a in self.attrs:
obj = getattr(obj, a)
return proxy(obj)
def __getattr__(self, name):
if self.cls is None:
return promised_attrproxy(lambda: self.cls, name)
return getattr(
attrproxy(self.cls, *self.attrs),
name)
class promised_attrproxy(attrproxy):
def __init__(self, promise, *attrs):
super().__init__(*attrs)
self._promise = promise
def __repr__(self):
return f'<promised_attrproxy for {self.attrs}>'
def __getattr__(self, name):
cls = self._promise()
return getattr(cls, name)
class proxy:
def __init__(self, obj):
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
class fixed_attrs_proxy(proxy):
"""
On attribute lookup, if keyword parameter matching attribute name was
provided during object construction, returns it's value. Otherwise proxies
to obj.
"""
def __init__(self, obj, **kw):
self._obj = obj
for k, v in kw.items():
setattr(self, k, v)
@tg.expose(content_type='text/plain')
def json_validation_error(controller, **kwargs):
exc = request.validation['exception']
result = dict(status='Validation Error',
errors={fld: str(err) for fld, err in exc.error_dict.items()},
value=exc.value,
params=kwargs)
response.status = 400
return json.dumps(result, indent=2)
def pop_user_notifications(user=None):
from allura import model as M
if user is None:
user = c.user
mbox = M.Mailbox.query.get(user_id=user._id, is_flash=True)
if mbox and mbox.queue:
notifications = M.Notification.query.find(
dict(_id={'$in': mbox.queue}))
mbox.queue = []
mbox.queue_empty = True
for n in notifications:
# clean it up so it doesn't hang around
M.Notification.query.remove({'_id': n._id})
yield n
def config_with_prefix(d, prefix):
'''Return a subdictionary keys with a given prefix,
with the prefix stripped
'''
plen = len(prefix)
return {k[plen:]: v for k, v in d.items()
if k.startswith(prefix)}
def paging_sanitizer(limit, page, total_count=sys.maxsize, zero_based_pages=True):
"""Return limit, page - both converted to int and constrained to
valid ranges based on total_count.
Useful for sanitizing limit and page query params.
See also g.handle_paging which also checks prefs
"""
try:
limit = max(int(limit), 1)
except ValueError:
limit = 25
limit = min(limit, asint(tg.config.get('limit_param_max', 500)))
max_page = (total_count // limit) + (1 if total_count % limit else 0)
max_page = max(0, max_page - (1 if zero_based_pages else 0))
try:
page = int(page or 0)
except ValueError:
page = 0
page = min(max(page, (0 if zero_based_pages else 1)), max_page)
return limit, page
def _add_inline_line_numbers_to_text(txt):
markup_text = '<div class="codehilite"><pre>'
for line_num, line in enumerate(txt.splitlines(), 1):
markup_text = markup_text + \
'<span id="l{}" class="code_block"><span class="lineno">{}</span> {}</span>'.format(
line_num, line_num, line)
markup_text = markup_text + '</pre></div>'
return markup_text
def _add_table_line_numbers_to_text(txt):
def _prepend_whitespaces(num, max_num):
num, max_num = str(num), str(max_num)
diff = len(max_num) - len(num)
return ' ' * diff + num
def _len_to_str_column(l, start=1):
max_num = l + start
return '\n'.join(map(_prepend_whitespaces, list(range(start, max_num)), [max_num] * l))
lines = txt.splitlines(True)
linenumbers = '<td class="linenos"><div class="linenodiv"><pre>' + \
_len_to_str_column(len(lines)) + '</pre></div></td>'
markup_text = '<table class="codehilitetable"><tbody><tr>' + \
linenumbers + '<td class="code"><div class="codehilite"><pre>'
for line_num, line in enumerate(lines, 1):
markup_text = markup_text + \
f'<span id="l{line_num}" class="code_block">{line}</span>'
markup_text = markup_text + '</pre></div></td></tr></tbody></table>'
return markup_text
INLINE = 'inline'
TABLE = 'table'
def render_any_markup(name, txt, code_mode=False, linenumbers_style=TABLE):
"""
renders markdown using allura enhancements if file is in markdown format
renders any other markup format using the pypeline
Returns jinja-safe text
"""
if not txt:
txt = '<p><em>Empty File</em></p>'
else:
fmt = g.pypeline_markup.can_render(name)
txt = really_unicode(txt)
if fmt == 'markdown':
txt = g.markdown.convert(txt)
else:
txt = g.pypeline_markup.render(name, txt)
if not fmt:
if code_mode and linenumbers_style == INLINE:
txt = _add_inline_line_numbers_to_text(txt)
elif code_mode and linenumbers_style == TABLE:
txt = _add_table_line_numbers_to_text(txt)
else:
txt = '<pre>%s</pre>' % txt
return Markup(txt)
@pass_context
def subrender_jinja_filter(context, value):
_template = context.eval_ctx.environment.from_string(value)
result = _template.render(**context)
return result
def nl2br_jinja_filter(value):
result = '<br>\n'.join(escape(line) for line in value.split('\n'))
return Markup(result)
def log_if_changed(artifact, attr, new_val, message):
"""Set `artifact.attr` to `new_val` if changed. Add AuditLog record."""
from allura import model as M
if not hasattr(artifact, attr):
return
if getattr(artifact, attr) != new_val:
M.AuditLog.log(message)
setattr(artifact, attr, new_val)
def get_tool_packages(tool_name):
"Return package for given tool (e.g. 'forgetracker' for 'tickets')"
from allura.app import Application
app = g.entry_points['tool'].get(tool_name.lower())
if not app:
return []
classes = [c for c in app.mro() if c not in (Application, object)]
return [cls.__module__.split('.')[0] for cls in classes]
def get_first(d, key):
"""Return value for d[key][0] if d[key] is a list with elements, else return d[key].
Useful to retrieve values from solr index (e.g. `title` and `text` fields),
which are stored as lists.
"""
v = d.get(key)
if isinstance(v, list):
return v[0] if len(v) > 0 else None
return v
def datetimeformat(value, format='%Y-%m-%d %H:%M:%S'):
return value.strftime(format)
@contextmanager
def log_output(log):
# TODO: replace with contextlib.redirect_stdout and redirect_stderr?
class Writer:
def __init__(self, func):
self.func = func
self.closed = False
def write(self, buf):
self.func(buf)
def flush(self):
pass
_stdout = sys.stdout
_stderr = sys.stderr
sys.stdout = Writer(log.info)
sys.stderr = Writer(log.error)
try:
yield log
finally:
sys.stdout = _stdout
sys.stderr = _stderr
def topological_sort(items, partial_order):
"""Perform topological sort.
items is a list of items to be sorted.
partial_order is a list of pairs. If pair (a,b) is in it, it means
that item a should appear before item b.
Returns a list of the items in one of the possible orders, or None
if partial_order contains a loop.
Modified from: http://www.bitformation.com/art/python_toposort.html
"""
# Original topological sort code written by Ofer Faigon
# (www.bitformation.com) and used with permission
def add_arc(graph, fromnode, tonode):
"""Add an arc to a graph. Can create multiple arcs.
The end nodes must already exist."""
graph[fromnode].append(tonode)
# Update the count of incoming arcs in tonode.
graph[tonode][0] = graph[tonode][0] + 1
# step 1 - create a directed graph with an arc a->b for each input
# pair (a,b).
# The graph is represented by a dictionary. The dictionary contains
# a pair item:list for each node in the graph. /item/ is the value
# of the node. /list/'s 1st item is the count of incoming arcs, and
# the rest are the destinations of the outgoing arcs. For example:
# {'a':[0,'b','c'], 'b':[1], 'c':[1]}
# represents the graph: c <-- a --> b
# The graph may contain loops and multiple arcs.
# Note that our representation does not contain reference loops to
# cause GC problems even when the represented graph contains loops,
# because we keep the node names rather than references to the nodes.
graph = defaultdict(lambda: [0])
for a, b in partial_order:
add_arc(graph, a, b)
# Step 2 - find all roots (nodes with zero incoming arcs).
roots = [n for n in items if graph[n][0] == 0]
roots.reverse() # keep sort stable
# step 3 - repeatedly emit a root and remove it from the graph. Removing
# a node may convert some of the node's direct children into roots.
# Whenever that happens, we append the new roots to the list of
# current roots.
sorted = []
while roots:
# If len(roots) is always 1 when we get here, it means that
# the input describes a complete ordering and there is only
# one possible output.
# When len(roots) > 1, we can choose any root to send to the
# output; this freedom represents the multiple complete orderings
# that satisfy the input restrictions. We arbitrarily take one of
# the roots using pop(). Note that for the algorithm to be efficient,
# this operation must be done in O(1) time.
root = roots.pop()
sorted.append(root)
for child in graph[root][1:]:
graph[child][0] = graph[child][0] - 1
if graph[child][0] == 0:
roots.append(child)
del graph[root]
if len(graph) > 0:
# There is a loop in the input.
return None
return sorted
@contextmanager
def ming_config(**conf):
r"""Temporarily swap in a new ming configuration, restoring the previous
one when the contextmanager exits.
:param \*\*conf: keyword arguments defining the new ming configuration
"""
import ming
from ming.session import Session
datastores = Session._datastores
try:
utils.configure_ming(conf)
yield
finally:
Session._datastores = datastores
for name, session in Session._registry.items():
session.bind = datastores.get(name, None)
session._name = name
@contextmanager
def ming_config_from_ini(ini_path):
"""Temporarily swap in a new ming configuration, restoring the previous
one when the contextmanager exits.
:param ini_path: Path to ini file containing the ming configuration
"""
root = pkg_resources.get_distribution('allura').location
conf = appconfig('config:%s' % os.path.join(root, ini_path))
with ming_config(**conf):
yield
def shlex_split(string):
# py2/3 compatibility
return [six.ensure_text(s) for s in shlex.split(six.ensure_str(string))]
def split_select_field_options(field_options):
try:
field_options = shlex_split(field_options)
except ValueError:
field_options = field_options.split()
# After regular split field_options might contain a " characters,
# which would break html when rendered inside tag's value attr.
# Escaping doesn't help here, 'cause it breaks EasyWidgets' validation,
# so we're getting rid of those.
field_options = [o.replace('"', '') for o in field_options]
return field_options
@contextmanager
def notifications_disabled(project, disabled=True):
"""Temporarily disable email notifications on a project.
"""
orig = project.notifications_disabled
try:
project.notifications_disabled = disabled
yield
finally:
project.notifications_disabled = orig
@contextmanager
def null_contextmanager(returning=None, *args, **kw):
"""A no-op contextmanager.
"""
yield returning
class exceptionless:
'''Decorator making the decorated function return 'error_result' on any
exceptions rather than propagating exceptions up the stack
'''
def __init__(self, error_result, log=None):
self.error_result = error_result
self.log = log
def __call__(self, fun):
fname = 'exceptionless(%s)' % fun.__name__
def inner(*args, **kwargs):
try:
return fun(*args, **kwargs)
except Exception as e:
if self.log:
self.log.exception(
'Error calling %s(args=%s, kwargs=%s): %s',
fname, args, kwargs, str(e))
return self.error_result
inner.__name__ = str(fname)
return inner
def urlopen(url, retries=3, codes=(408, 500, 502, 503, 504), timeout=None):
"""Open url, optionally retrying if an error is encountered.
Socket and other IO errors will always be retried if retries > 0.
HTTP errors are retried if the error code is passed in ``codes``.
:param retries: Number of time to retry.
:param codes: HTTP error codes that should be retried.
"""
attempts = 0
while True:
try:
return six.moves.urllib.request.urlopen(url, timeout=timeout)
except OSError as e:
no_retry = isinstance(e, six.moves.urllib.error.HTTPError) and e.code not in codes
if attempts < retries and not no_retry:
attempts += 1
continue
else:
try:
url_string = url.get_full_url() # if url is Request obj
except Exception:
url_string = url
if hasattr(e, 'filename') and url_string != e.filename:
url_string += f' => {e.filename}'
if timeout is None:
timeout = socket.getdefaulttimeout()
if getattr(e, 'fp', None):
body = e.fp.read()
else:
body = ''
log.exception(
'Failed after %s retries on url with a timeout of %s: %s: %s',
attempts, timeout, url_string, body[:250])
raise e
def plain2markdown(txt, preserve_multiple_spaces=False, has_html_entities=False):
if not has_html_entities:
# prevent &foo; and { from becoming HTML entities
txt = re_amp.sub('&', txt)
# avoid accidental 4-space indentations creating code blocks
if preserve_multiple_spaces:
txt = txt.replace('\t', ' ' * 4)
txt = re_preserve_spaces.sub(' ', txt)
else:
txt = re_leading_spaces.sub('', txt)
try:
# try to use html2text for most of the escaping
import html2text
html2text.BODY_WIDTH = 0
txt = html2text.escape_md_section(txt, snob=True)
except ImportError:
# fall back to just escaping any MD-special chars
txt = md_chars_matcher_all.sub(r"\\\1", txt)
# prevent < and > from becoming tags
txt = re_angle_bracket_open.sub('<', txt)
txt = re_angle_bracket_close.sub('>', txt)
return txt
OrderedDefaultDict = defaultdict # py3.7 dicts are always ordered
def iter_entry_points(group, *a, **kw):
"""Yields entry points that have not been disabled in the config.
If ``group`` is "allura" (Allura tool entry points) or one of subgroups
(e.g. "allura.phone"), this function also checks for multiple entry points
with the same name. If there are multiple entry points with the same name,
and one of them is a subclass of the other(s), it will be yielded, and the
other entry points with that name will be ignored. If a subclass is not
found, an ImportError will be raised.
This treatment of "allura" and "allura.*" entry points allows tool authors
to subclass another tool while reusing the original entry point name.
"""
def active_eps():
disabled = aslist(
tg.config.get('disable_entry_points.' + group), sep=',')
return [ep for ep in pkg_resources.iter_entry_points(group, *a, **kw)
if ep.name not in disabled]
def unique_eps(entry_points):
by_name = OrderedDefaultDict(list)
for ep in entry_points:
by_name[ep.name].append(ep)
for name, eps in by_name.items():
ep_count = len(eps)
if ep_count == 1:
yield eps[0]
else:
yield subclass(eps)
def subclass(entry_points):
loaded = {ep: ep.load() for ep in entry_points}
for ep, cls in loaded.items():
others = list(loaded.values())[:]
others.remove(cls)
if all([issubclass(cls, other) for other in others]):
return ep
raise ImportError('Ambiguous [allura] entry points detected. ' +
'Multiple entry points with name "%s".' % entry_points[0].name)
is_allura = group == 'allura' or group.startswith('allura.')
return iter(unique_eps(active_eps()) if is_allura else active_eps())
# http://stackoverflow.com/a/1060330/79697
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
@contextmanager
def login_overlay(exceptions=None):
"""
Override the default behavior of redirecting to the auth.login_url and
instead display an overlay with content from auth.login_fragment_url.
This is to allow pages that require authentication for any actions but
not for the initial view to be more apparent what you will get once
logged in.
This should be wrapped around call to `require_access()` (presumably in
the `_check_security()` method on a controller). The `exceptions` param
can be given a list of exposed views to leave with the original behavior.
For example::
class MyController(BaseController);
def _check_security(self):
with login_overlay(exceptions=['process']):
require_access(self.neighborhood, 'register')
@expose
def index(self, *args, **kw):
return {}
@expose
def list(self, *args, **kw):
return {}
@expose
def process(self, *args, **kw):
return {}
This would show the overlay to unauthenticated users who visit `/`
or `/list` but would perform the normal redirect when `/process` is
visited.
"""
try:
yield
except HTTPUnauthorized:
if exceptions:
for exception in exceptions:
if request.path.rstrip('/').endswith('/%s' % exception):
raise
c.show_login_overlay = True
def unidiff(old, new):
"""Returns unified diff between `one` and `two`."""
return '\n'.join(difflib.unified_diff(
a=old.splitlines(),
b=new.splitlines(),
fromfile='old',
tofile='new',
lineterm=''))
def auditlog_user(message, *args, **kwargs):
"""
Create an audit log entry for a user, including the IP address
:param str message:
:param user: a :class:`allura.model.auth.User`
"""
from allura import model as M
ip_address = utils.ip_address(request)
message = f'IP Address: {ip_address}\nUser-Agent: {request.user_agent}\n' + message
if c.user and kwargs.get('user') and kwargs['user'] != c.user:
message = f'Done by user: {c.user.username}\n' + message
return M.AuditLog.log_user(message, *args, **kwargs)
def get_user_status(user):
'''
Get user status based on disabled and pending attrs
:param user: a :class:`allura.model.auth.User`
'''
disabled = user.disabled
pending = user.pending
if not disabled and not pending:
return 'enabled'
elif disabled:
return 'disabled'
elif pending:
return 'pending'
def rate_limit(cfg_opt, artifact_count, start_date, exception=None):
"""
Check the various config-defined artifact creation rate limits, and if any
are exceeded, raise exception.
:param artifact_count: a number or callable (for lazy evaluation)
"""
if exception is None:
exception = exc.RatelimitError
rate_limits = json.loads(tg.config.get(cfg_opt, '{}'))
now = datetime.utcnow()
for rate, count in rate_limits.items():
age = now - start_date
age = (age.microseconds + (age.seconds + age.days * 24 * 3600) * 10 ** 6) / 10 ** 6
if age < int(rate):
if callable(artifact_count):
artifact_count = artifact_count()
if artifact_count >= count:
raise exception()
def base64uri(content_or_image, image_format='PNG', mimetype='image/png', windows_line_endings=False):
if hasattr(content_or_image, 'save'):
output = BytesIO()
content_or_image.save(output, format=image_format)
content = output.getvalue()
else:
content = content_or_image
if windows_line_endings:
content = content.replace('\n', '\r\n')
data = six.ensure_text(base64.b64encode(six.ensure_binary(content)))
return f'data:{mimetype};base64,{data}'
def slugify(name, allow_periods=False):
"""
Returns a tuple with slug and lowered slug based on name
"""
RE_NON_ALPHA_ETC = re.compile(r'[^.\w]+' if allow_periods else r'[^\w]+')
slug = RE_NON_ALPHA_ETC.sub('-', # replace non ". alphanum_" sequences into single -
unicodedata.normalize('NFKD', name).encode('ascii', 'ignore').decode().replace("'", '') # asciify & strip apostophes. https://stackoverflow.com/a/53261200
).strip('-') # leading - or trailing - gets removed
return slug, slug.lower()
email_re = re.compile(r'(([a-z0-9_]|\-|\.)+)@([\w\.-]+)', re.IGNORECASE)
def hide_private_info(message):
if asbool(tg.config.get('hide_private_info', 'true')) and message:
hidden = email_re.sub(r'\1@...', message)
if type(message) not in (str,):
# custom subclass like markupsafe.Markup, convert to that type again
hidden = type(message)(hidden)
return hidden
else:
return message
def emojize(text):
"""Coverts emoji codes to unicode emojis"""
return emoji.emojize(text, language="alias")
def get_current_reaction(react_users_dict):
"""Return current selected reaction for given react_users dict"""
return utils.get_key_from_value(react_users_dict, c.user.username)
def username_project_url(user_or_username):
from allura.lib import plugin
url = None
if not user_or_username:
return url
if isinstance(user_or_username, str):
class UserName:
def __init__(self, username):
self.username = username
username = user_or_username
auth_provider = plugin.AuthenticationProvider.get(request)
try:
# in 99% of cases, we can get away without a DB lookup
url = auth_provider.user_project_url(UserName(username))
except AttributeError:
user = auth_provider.by_username(username)
url = user.url()
else:
user = user_or_username
url = user.url()
return f'{url}profile/'
def pluralize_tool_name(tool_name: string, count: int):
pluralize_tools = ['Wiki', 'Discussion', 'Blog']
if tool_name is not None and tool_name in pluralize_tools:
return f"{tool_name}{'s'[:count^1]}"
return tool_name
| {
"content_hash": "19f7f8f6ebd646498784029fc4a2e41d",
"timestamp": "",
"source": "github",
"line_count": 1320,
"max_line_length": 189,
"avg_line_length": 32.08712121212121,
"alnum_prop": 0.6121119112265376,
"repo_name": "apache/allura",
"id": "51d867ebca69ff69d8848615678645795674ec29",
"size": "43225",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Allura/allura/lib/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "181457"
},
{
"name": "Dockerfile",
"bytes": "4748"
},
{
"name": "HTML",
"bytes": "867332"
},
{
"name": "JavaScript",
"bytes": "1191836"
},
{
"name": "Makefile",
"bytes": "6248"
},
{
"name": "Python",
"bytes": "4499987"
},
{
"name": "RAML",
"bytes": "27600"
},
{
"name": "Roff",
"bytes": "41"
},
{
"name": "Ruby",
"bytes": "1280"
},
{
"name": "SCSS",
"bytes": "27742"
},
{
"name": "Shell",
"bytes": "131207"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
import mock
from twisted.trial import unittest
from twisted.internet import defer
from buildbot.test.fake import fakedb
from buildbot.process.users import manual
class ManualUsersMixin(object):
"""
This class fakes out the master/db components to test the manual
user managers located in process.users.manual.
"""
class FakeMaster(object):
def __init__(self):
self.db = fakedb.FakeDBConnector(self)
self.slavePortnum = "tcp:9989"
self.caches = mock.Mock(name="caches")
self.caches.get_cache = self.get_cache
def get_cache(self, cache_name, miss_fn):
c = mock.Mock(name=cache_name)
c.get = miss_fn
return c
def setUpManualUsers(self):
self.master = self.FakeMaster()
class TestUsersBase(unittest.TestCase):
"""
Not really sure what there is to test, aside from _setUpManualUsers getting
self.master set.
"""
pass
class TestCommandlineUserManagerPerspective(unittest.TestCase, ManualUsersMixin):
def setUp(self):
self.setUpManualUsers()
def call_perspective_commandline(self, *args):
persp = manual.CommandlineUserManagerPerspective(self.master)
return persp.perspective_commandline(*args)
def test_perspective_commandline_add(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'x', 'git': 'x'}])
def check_get(_):
d = self.master.db.users.getUser(1)
def real_check(usdict):
self.assertEqual(usdict, dict(uid=1,
identifier='x',
bb_username=None,
bb_password=None,
git='x'))
d.addCallback(real_check)
return d
d.addCallback(check_get)
return d
def test_perspective_commandline_update(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'x', 'svn':'x'}])
d.addCallback(lambda _ :
self.call_perspective_commandline(
'update', None, None, None,
[{'identifier':'x', 'svn':'y'}]))
def check(_):
d = self.master.db.users.getUser(1)
def real_check(usdict):
self.assertEqual(usdict, dict(uid=1,
identifier='x',
bb_username=None,
bb_password=None,
svn='y'))
d.addCallback(real_check)
return d
d.addCallback(check)
return d
def test_perspective_commandline_update_bb(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'x',
'svn':'x'}])
d.addCallback(lambda _ :
self.call_perspective_commandline(
'update', 'bb_user',
'hashed_bb_pass', None,
[{'identifier':'x'}]))
def check(_):
d = self.master.db.users.getUser(1)
def real_check(usdict):
self.assertEqual(usdict, dict(uid=1,
identifier='x',
bb_username='bb_user',
bb_password='hashed_bb_pass',
svn='x'))
d.addCallback(real_check)
return d
d.addCallback(check)
return d
def test_perspective_commandline_update_both(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'x',
'svn':'x'}])
d.addCallback(lambda _ :
self.call_perspective_commandline(
'update', 'bb_user',
'hashed_bb_pass', None,
[{'identifier':'x',
'svn':'y'}]))
def check(_):
d = self.master.db.users.getUser(1)
def real_check(usdict):
self.assertEqual(usdict, dict(uid=1,
identifier='x',
bb_username='bb_user',
bb_password='hashed_bb_pass',
svn='y'))
d.addCallback(real_check)
return d
d.addCallback(check)
return d
def test_perspective_commandline_remove(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'h@c',
'git': 'hi <h@c>'}])
d.addCallback(lambda _ :
self.call_perspective_commandline('remove', None,
None, ['x'], None))
def check(_):
d = self.master.db.users.getUser('x')
def real_check(res):
self.assertEqual(res, None)
d.addCallback(real_check)
return d
d.addCallback(check)
return d
def test_perspective_commandline_get(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'x',
'svn':'x'}])
d.addCallback(lambda _ :
self.call_perspective_commandline('get', None, None,
['x'], None))
def check(_):
d = self.master.db.users.getUser(1)
def real_check(res):
self.assertEqual(res, dict(uid=1,
identifier='x',
bb_username=None,
bb_password=None,
svn='x'))
d.addCallback(real_check)
return d
d.addCallback(check)
return d
def test_perspective_commandline_get_multiple_attrs(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier': 'x',
'svn': 'x',
'git': 'x@c'}])
d.addCallback(lambda _ :
self.call_perspective_commandline('get', None, None,
['x'], None))
def check(_):
d = self.master.db.users.getUser(1)
def real_check(res):
self.assertEqual(res, dict(uid=1,
identifier='x',
bb_username=None,
bb_password=None,
svn='x',
git='x@c'))
d.addCallback(real_check)
return d
d.addCallback(check)
return d
def test_perspective_commandline_add_format(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'x', 'svn':'x'}])
def check(result):
exp_format = "user(s) added:\nidentifier: x\nuid: 1\n\n"
self.assertEqual(result, exp_format)
d.addCallback(check)
return d
def test_perspective_commandline_update_format(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'x', 'svn':'x'}])
d.addCallback(lambda _ :
self.call_perspective_commandline('update', None, None, None,
[{'identifier':'x',
'svn':'y'}]))
def check(result):
exp_format = 'user(s) updated:\nidentifier: x\n'
self.assertEqual(result, exp_format)
d.addCallback(check)
return d
def test_perspective_commandline_remove_format(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'h@c',
'git': 'hi <h@c>'}])
d.addCallback(lambda _ : self.call_perspective_commandline('remove',
None, None,
['h@c'],
None))
def check(result):
exp_format = "user(s) removed:\nidentifier: h@c\n"
self.assertEqual(result, exp_format)
d.addCallback(check)
return d
def test_perspective_commandline_get_format(self):
d = self.call_perspective_commandline('add', None, None, None,
[{'identifier':'x@y', 'git': 'x <x@y>'}])
d.addCallback(lambda _ :
self.call_perspective_commandline('get', None, None,
['x@y'], None))
def check(result):
exp_format = 'user(s) found:\ngit: x <x@y>\nidentifier: x@y\n' \
'bb_username: None\nuid: 1\n\n'
self.assertEqual(result, exp_format)
d.addCallback(check)
return d
def test_perspective_commandline_remove_no_match_format(self):
d = self.call_perspective_commandline('remove', None, None, ['x'], None)
def check(result):
exp_format = "user(s) removed:\n"
self.assertEqual(result, exp_format)
d.addCallback(check)
return d
def test_perspective_commandline_get_no_match_format(self):
d = self.call_perspective_commandline('get', None, None, ['x'], None)
def check(result):
exp_format = "user(s) found:\nno match found\n"
self.assertEqual(result, exp_format)
d.addCallback(check)
return d
class TestCommandlineUserManager(unittest.TestCase, ManualUsersMixin):
def setUp(self):
self.setUpManualUsers()
self.manual_component = manual.CommandlineUserManager(username="user",
passwd="userpw",
port="9990")
self.manual_component.master = self.master
def test_no_userpass(self):
d = defer.maybeDeferred(lambda : manual.CommandlineUserManager())
return self.assertFailure(d, AssertionError)
def test_no_port(self):
d = defer.maybeDeferred(lambda : manual.CommandlineUserManager(username="x",
passwd="y"))
return self.assertFailure(d, AssertionError)
def test_service(self):
# patch out the pbmanager's 'register' command both to be sure
# the registration is correct and to get a copy of the factory
registration = mock.Mock()
registration.unregister = lambda : defer.succeed(None)
self.master.pbmanager = mock.Mock()
def register(portstr, user, passwd, factory):
self.assertEqual([portstr, user, passwd],
['9990', 'user', 'userpw'])
self.got_factory = factory
return registration
self.master.pbmanager.register = register
self.manual_component.startService()
persp = self.got_factory(mock.Mock(), 'user')
self.failUnless(isinstance(persp, manual.CommandlineUserManagerPerspective))
return self.manual_component.stopService()
| {
"content_hash": "a20398d5e9a5d239076dad3c4e7740e7",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 87,
"avg_line_length": 44.06643356643357,
"alnum_prop": 0.45028961358406727,
"repo_name": "denny820909/builder",
"id": "9b33005185db294ee62bbb75c036e3d4bd2c62e7",
"size": "13448",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_process_users_manual.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
import zookeeper
import threading
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
zookeeper.set_debug_level(zookeeper.LOG_LEVEL_WARN)
# Mapping of connection state values to human strings.
STATE_NAME_MAPPING = {
zookeeper.ASSOCIATING_STATE: "associating",
zookeeper.AUTH_FAILED_STATE: "auth-failed",
zookeeper.CONNECTED_STATE: "connected",
zookeeper.CONNECTING_STATE: "connecting",
zookeeper.EXPIRED_SESSION_STATE: "expired",
}
# Mapping of event type to human string.
TYPE_NAME_MAPPING = {
zookeeper.NOTWATCHING_EVENT: "not-watching",
zookeeper.SESSION_EVENT: "session",
zookeeper.CREATED_EVENT: "created",
zookeeper.DELETED_EVENT: "deleted",
zookeeper.CHANGED_EVENT: "changed",
zookeeper.CHILD_EVENT: "child",
}
class TimeoutException( zookeeper.ZooKeeperException):
pass
def logevent(h,typ, state, path):
logger.debug("event,handle:%d, type:%s, state:%s, path:%s", h, TYPE_NAME_MAPPING.get(typ, "unknown"), STATE_NAME_MAPPING.get(state, "unknown"), path)
class ZKClient:
def __init__(self, servers, timeout= 10):
self.timeout = timeout
self.connected = False
self.handle = -1
self.servers = servers
self.timeout= timeout
self.watchers = set()
self._lock = threading.Lock()
self.conn_cv = threading.Condition( )
def start(self):
self.handle = zookeeper.init(self.servers, self.connection_watcher, self.timeout * 1000)
self.conn_cv.acquire()
self.conn_cv.wait(self.timeout)
self.conn_cv.release()
if not self.connected:
raise TimeoutException
def stop(self):
return zookeeper.close(self.handle)
def connection_watcher(self, h, typ, state, path):
logevent(h, typ, state, path)
if typ == zookeeper.SESSION_EVENT:
if state == zookeeper.CONNECTED_STATE:
self.handle = h
with self._lock:
self.connected = True
watchers = list(self.watchers)
for watcher in watchers:
watcher.watch()
self.conn_cv.acquire()
self.conn_cv.notifyAll()
self.conn_cv.release()
def del_watcher(self, watcher):
with self._lock:
self.watchers.discard(watcher)
def add_watcher(self, watcher):
with self._lock:
self.watchers.add(watcher)
if self.connected:
watcher.watch()
class DataWatch:
def __init__(self, client, path, func):
self._client = client
self._path = path
self._func = func
self._stopped = False
client.add_watcher(self)
def watcher(self, h, typ, state, path):
logevent(h, typ, state, path)
self.watch()
def _do(self):
data, stat = zookeeper.get(self._client.handle, self._path, self.watcher)
return self._func(data, stat)
def watch(self):
if self._stopped:
return
try:
result = self._do()
if result is False:
self._stopped = True
except zookeeper.NoNodeException:
raise
except zookeeper.ZooKeeperException as e:
logger.error("ZooKeeperException, type:%s, msg: %s", type(e), e)
class ChildrenWatch(DataWatch):
def _do(self):
children = zookeeper.get_children(self._client.handle, self._path, self.watcher)
return self._func(children)
| {
"content_hash": "584a01a1bb66bf09f485cb7e8fbac695",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 153,
"avg_line_length": 29.613445378151262,
"alnum_prop": 0.6118047673098751,
"repo_name": "buaabarty/dpark",
"id": "699f747bac6d08d40cedff14beabc36f2a564fb4",
"size": "3524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dpark/pymesos/zkpython.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
'''
tests/runtests.py
Unit test runner. Without any arguments, this runs all available tests. Flags
may be used to selectively run tests, or just show some diagnostic information.
Log output is automatically captured by this script.
'''
import functools
import io
import logging
import os
import sys
import unittest
if __name__ == '__main__':
# add import path and load with proper absolute imports
test_dir = os.path.dirname(__file__)
project_dir = os.path.abspath(os.path.join(test_dir, '..'))
sys.path.insert(0, project_dir)
from cli.args import base_cli_argparser
from lib.util.log import get_smart_truncate_formatter
else:
try:
# use relative imports to avoid messing with import paths
from ..cli.args import base_cli_argparser
from ..lib.util.log import get_smart_truncate_formatter
except (ImportError, ValueError):
# whatever, we tried
pass
logger = logging.getLogger('sublime-ycmd.' + __name__)
# custom log level - used when logging results from the unittest TestRunner
LOGLEVEL_NOTICE = 100
LOGLEVELNAME_NOTICE = 'NOTICE'
def configure_logging(log_level=None, output_stream=None):
'''
Configures the logging module for running tests. This automatically binds
the helpers in lib.logutils and captures output into a specified file.
Supplying None to the parameters will result in defaults.
'''
if log_level is None:
log_level = logging.WARNING
if output_stream is None:
output_stream = sys.stderr
logging.addLevelName(LOGLEVEL_NOTICE, LOGLEVELNAME_NOTICE)
logger_instance = logging.getLogger('sublime-ycmd')
logger_instance.propagate = False
# handler/filter will decide how to filter, so log everything here:
logger_instance.setLevel(logging.DEBUG)
if logger_instance.hasHandlers():
logger_handlers_old = [h for h in logger_instance.handlers]
for logger_handler in logger_handlers_old:
logger_instance.removeHandler(logger_handler)
logger_stream = logging.StreamHandler(stream=output_stream)
logger_stream.setLevel(log_level)
logger_formatter = get_smart_truncate_formatter()
logger_stream.setFormatter(logger_formatter)
logger_instance.addHandler(logger_stream)
def get_test_suite_items(test_suite):
'''
Generates a flattened iterator of all registered tests from the supplied
test suite. Use it for pretty-printing/logging only.
'''
assert isinstance(test_suite, unittest.TestSuite), \
'[internal] test_suite is not a unittest.TestSuite: %r' % test_suite
def get_subitems(test_suite_item):
''' Helper that returns the sub-items of a single test suite item. '''
if isinstance(test_suite_item, unittest.TestSuite):
return get_test_suite_items(test_suite_item)
elif isinstance(test_suite_item, unittest.TestCase):
return [str(test_suite_item)]
logger.warning('unknown test suite item type: %r', test_suite_item)
return []
test_suite_items = \
functools.reduce(lambda sum, cur: sum + get_subitems(cur),
test_suite, [])
return test_suite_items
def get_cli_argparser():
'''
Generates and returns an argparse.ArgumentParser instance for use with
parsing test-related options.
'''
parser = base_cli_argparser(
description='sublime-ycmd unit test runner',
)
testing_group = parser.add_argument_group(title='tests')
testing_group.add_argument(
'-l', '--list', action='store_true',
help='lists available tests (does not run them)',
)
testing_group.add_argument(
'-t', '--test', nargs='+',
help='runs only the specified tests',
)
return parser
class TestRunnerLogStream(io.TextIOBase):
'''
File stream wrapper class for use with TestRunner.
Instances of this class will accept messages written by a test runner
and then log them in the custom NOTICE log level. This produces nicely
formatted messages from the test runner output.
'''
def __init__(self):
super(TestRunnerLogStream, self).__init__()
self._buffer = ''
def consume_line(self):
'''
Returns a complete buffered line, if one exists, and removes it from
the buffer. If a complete line does not exist, this returns None, and
no modifications are made to the buffer.
'''
buffered_lines = self._buffer.splitlines()
if len(buffered_lines) <= 1:
return None
first_line = buffered_lines[0]
remaining_buffer = '\n'.join(buffered_lines[1:])
self._buffer = remaining_buffer
return first_line
@staticmethod
def testrunner_log(*args):
'''
Dummy wrapper around the logger log statement. This method exists to
provide a better funcName in the log record.
'''
logger.log(LOGLEVEL_NOTICE, *args)
def write(self, s):
'''
Receives messages and logs them using the test runner log level.
'''
nbytes = len(s)
self._buffer += s
buffered_line = self.consume_line()
while buffered_line:
self.testrunner_log(buffered_line)
buffered_line = self.consume_line()
return nbytes
def close(self):
'''
Receives the 'close' event. This writes out any pending buffered data
and then calls the parent 'close' method.
'''
for buffered_line in self._buffer.splitlines():
self.testrunner_log(buffered_line)
super(TestRunnerLogStream, self).close()
def main():
'''
Main method. Discovers and runs tests in the 'tests' subdirectory.
'''
cli_argparser = get_cli_argparser()
cli_args = cli_argparser.parse_args()
if cli_args.list not in [None, False]:
raise cli_argparser.error('Test listing is not yet implemented')
configure_logging(cli_args.log_level, cli_args.log_file)
logger.debug('initialized logger, about to load tests')
test_dir = os.path.dirname(__file__)
project_dir = os.path.join(test_dir, '..')
test_suite = unittest.defaultTestLoader.discover(
'tests', pattern='*.py', top_level_dir=project_dir,
)
logger.info('loaded %d tests: %s', test_suite.countTestCases(),
get_test_suite_items(test_suite))
logger.debug('about to run tests')
test_runner_logstream = TestRunnerLogStream()
test_runner = unittest.TextTestRunner(
stream=test_runner_logstream, verbosity=2,
)
unittest.installHandler()
test_runner.run(test_suite)
if __name__ == '__main__':
main()
| {
"content_hash": "8c3179d17765ce7a0aed7348b9d3e456",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 79,
"avg_line_length": 31.81042654028436,
"alnum_prop": 0.6571811680572109,
"repo_name": "sublime-ycmd/sublime-ycmd",
"id": "399800dd198fdb2152127473688727c2140f50a4",
"size": "6735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/runtests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "315808"
}
],
"symlink_target": ""
} |
"""Extensions which are maintained in-tree.
All public modules (those not beginning with ``_``) in this package are
extensions. They could, in concept, be maintained separately from HIL
core, but are in-tree as they are maintained by the core developers.
"""
| {
"content_hash": "e9dd6185d363bde4eb864e567510e1e0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 71,
"avg_line_length": 43.333333333333336,
"alnum_prop": 0.7615384615384615,
"repo_name": "SahilTikale/haas",
"id": "2ea1ac7def316505fb83fec27e63eacc28a278bd",
"size": "260",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "hil/ext/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "451"
},
{
"name": "Python",
"bytes": "357764"
}
],
"symlink_target": ""
} |
from datetime import datetime
import json
import os
from boto.s3.key import Key
from fabric.api import local, require, settings, task
from fabric.state import env
from termcolor import colored
import app_config
# Other fabfiles
import assets
import data
import flat
import issues
import render
import text
import utils
if app_config.DEPLOY_TO_SERVERS:
import servers
if app_config.DEPLOY_CRONTAB:
import cron_jobs
# Bootstrap can only be run once, then it's disabled
if app_config.PROJECT_SLUG == '$NEW_PROJECT_SLUG':
import bootstrap
"""
Base configuration
"""
env.user = app_config.SERVER_USER
env.forward_agent = True
env.hosts = []
env.settings = None
"""
Environments
Changing environment requires a full-stack test.
An environment points to both a server and an S3
bucket.
"""
@task
def production():
"""
Run as though on production.
"""
env.settings = 'production'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
@task
def staging():
"""
Run as though on staging.
"""
env.settings = 'staging'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
"""
Branches
Changing branches requires deploying that branch to a host.
"""
@task
def stable():
"""
Work on stable branch.
"""
env.branch = 'stable'
@task
def master():
"""
Work on development branch.
"""
env.branch = 'master'
@task
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
"""
Running the app
"""
@task
def app(port='8000'):
"""
Serve app.py.
"""
if env.settings:
local("DEPLOYMENT_TARGET=%s bash -c 'gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload app:wsgi_app'" % (env.settings, port))
else:
local('gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload app:wsgi_app' % port)
@task
def public_app(port='8001'):
"""
Serve public_app.py.
"""
if env.settings:
local("DEPLOYMENT_TARGET=%s bash -c 'gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload public_app:wsgi_app'" % (env.settings, port))
else:
local('gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload public_app:wsgi_app' % port)
@task
def tests():
"""
Run Python unit tests.
"""
local('nosetests')
"""
Deployment
Changes to deployment requires a full-stack test. Deployment
has two primary functions: Pushing flat files to S3 and deploying
code to a remote server if required.
"""
@task
def update():
"""
Update all application data not in repository (copy, assets, etc).
"""
text.update()
assets.sync()
data.update()
@task
def deploy(quick=None, remote='origin', reload=False):
"""
Deploy the latest app to S3 and, if configured, to our servers.
"""
require('settings', provided_by=[production, staging])
if app_config.DEPLOY_TO_SERVERS:
require('branch', provided_by=[stable, master, branch])
if (app_config.DEPLOYMENT_TARGET == 'production' and env.branch != 'stable'):
utils.confirm(
colored("You are trying to deploy the '%s' branch to production.\nYou should really only deploy a stable branch.\nDo you know what you're doing?" % env.branch, "red")
)
servers.checkout_latest(remote)
servers.fabcast('text.update')
servers.fabcast('assets.sync')
servers.fabcast('data.update')
if app_config.DEPLOY_CRONTAB:
servers.install_crontab()
if app_config.DEPLOY_SERVICES:
servers.deploy_confs()
if quick != 'quick':
update()
render.render_all()
# Clear files that should never be deployed
local('rm -rf www/live-data')
flat.deploy_folder(
app_config.S3_BUCKET,
'www',
app_config.PROJECT_SLUG,
headers={
'Cache-Control': 'max-age=%i' % app_config.DEFAULT_MAX_AGE
},
ignore=['www/assets/*', 'www/live-data/*']
)
flat.deploy_folder(
app_config.S3_BUCKET,
'www/assets',
'%s/assets' % app_config.PROJECT_SLUG,
headers={
'Cache-Control': 'max-age=%i' % app_config.ASSETS_MAX_AGE
}
)
if reload:
reset_browsers()
if not check_timestamp():
reset_browsers()
@task
def check_timestamp():
require('settings', provided_by=[production, staging])
bucket = utils.get_bucket(app_config.S3_BUCKET)
k = Key(bucket)
k.key = '%s/live-data/timestamp.json' % app_config.PROJECT_SLUG
if k.exists():
return True
else:
return False
@task
def reset_browsers():
"""
Deploy a timestamp so the client will reset their page. For bugfixes
"""
require('settings', provided_by=[production, staging])
if not os.path.exists('www/live-data'):
os.makedirs('www/live-data')
payload = {}
now = datetime.now().strftime('%s')
payload['timestamp'] = now
with open('www/live-data/timestamp.json', 'w') as f:
json.dump(payload, f)
flat.deploy_folder(
app_config.S3_BUCKET,
'www/live-data',
'%s/live-data' % app_config.PROJECT_SLUG,
headers={
'Cache-Control': 'max-age=%i' % app_config.DEFAULT_MAX_AGE
}
)
"""
Destruction
Changes to destruction require setup/deploy to a test host in order to test.
Destruction should remove all files related to the project from both a remote
host and S3.
"""
@task
def shiva_the_destroyer():
"""
Deletes the app from s3
"""
require('settings', provided_by=[production, staging])
utils.confirm(
colored("You are about to destroy everything deployed to %s for this project.\nDo you know what you're doing?')" % app_config.DEPLOYMENT_TARGET, "red")
)
with settings(warn_only=True):
flat.delete_folder(app_config.S3_BUCKET, app_config.PROJECT_SLUG)
if app_config.DEPLOY_TO_SERVERS:
servers.delete_project()
if app_config.DEPLOY_CRONTAB:
servers.uninstall_crontab()
if app_config.DEPLOY_SERVICES:
servers.nuke_confs()
| {
"content_hash": "6b62fa36da086439d3f3caac22342607",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 182,
"avg_line_length": 23.343396226415095,
"alnum_prop": 0.6273844164241836,
"repo_name": "mroswell/m2016",
"id": "d1470f639ec51d786e805e2f60fbf3cd9572571a",
"size": "6209",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "fabfile/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "361483"
},
{
"name": "HTML",
"bytes": "318253"
},
{
"name": "JavaScript",
"bytes": "674055"
},
{
"name": "Nginx",
"bytes": "136"
},
{
"name": "Python",
"bytes": "86717"
},
{
"name": "Shell",
"bytes": "83"
}
],
"symlink_target": ""
} |
from datetime import date
from openpyxl import load_workbook
if __name__ == '__main__':
wb = load_workbook('LiborCurveBootstrap.xlsx')
ws = wb.active
# Take the input parameters
today = ws['C2'].value.date()
libor_tenor = ws['C3'].value
libor_value = ws['C4'].value
# OIS Data
ois_startdate = today
ois_maturities = []
ois_mktquotes = []
for cell in list(ws.iter_rows('B8:C37')):
ois_maturities.append(cell[0].value)
ois_mktquotes.append(cell[1].value)
# Swap Data
swap_startdate = today
swap_maturities = []
swap_mktquotes = []
for cell in list(ws.iter_rows('E8:F40')):
swap_maturities.append(cell[0].value)
swap_mktquotes.append(cell[1].value)
# Output Dates
output_dates = []
for cell in list(ws.iter_rows('H8:H40')):
output_dates.append(cell[0].value.date())
# YOUR CODE HERE .... The result of your code must be a variable of type list whose name
# must be output_results. The length of this list has to be the same of output_dates
# END OF YOUR CODE
# Write results
# A variable named output_results of type list, with the same length of output_dates, is expected.
# In case this is not present, a message is written
if 'output_results' not in locals():
output_result = ["Not Successful" for x in range(len(output_dates))]
out_list = list(ws.iter_rows('I8:I40'))
for i in range(len(output_result)):
out_list[i][0].value = output_result[i]
# A new file with the results is created
wb.save("LiborCurveBootstrap_output.xlsx") | {
"content_hash": "034e0afb43a01dd68595ea912634ce12",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 102,
"avg_line_length": 29.962962962962962,
"alnum_prop": 0.6390605686032138,
"repo_name": "gabberthomson/fm_finpy",
"id": "9fe763d30d9d302bcb37421d284b921b3bf37569",
"size": "1618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "liborbootstrap_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40348"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| {
"content_hash": "4e54bf1bd57324c0e19e1f9214f1327f",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 85,
"avg_line_length": 41.552,
"alnum_prop": 0.5987678090103966,
"repo_name": "pytorch/fairseq",
"id": "7c4d8ba3802a2da9467c42b0aa18653c7bbb2ec9",
"size": "5372",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/speech_recognition/criterions/cross_entropy_acc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
} |
"""py.test configuration file."""
import os
import pytest
import xal
@pytest.fixture(scope='session', params=['local', 'fabric'])
def session(request):
# Absolute path to current working directory. This is useful to setup
# working directory in tests in order to use fixtures.
here = os.path.abspath(os.getcwd())
xal_session = None
if request.param == 'local':
xal_session = xal.LocalSession()
elif request.param == 'fabric':
xal_session = xal.FabricSession(host='localhost')
context = xal_session.path.cd(here)
context.__enter__()
return xal_session
| {
"content_hash": "b0712ef9e2c4812ac36b7e10530cbd1b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 28.904761904761905,
"alnum_prop": 0.6738056013179572,
"repo_name": "benoitbryon/xal",
"id": "f42ffb8707736714733e434f7abd8be0927f2976",
"size": "607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1418"
},
{
"name": "Python",
"bytes": "63885"
},
{
"name": "Shell",
"bytes": "562"
}
],
"symlink_target": ""
} |
import os
import sys
import config
import common
import shutil
# 该函数完成清空 target 目录操作
def freeTarget():
print common.encodeChinese("清空 target 目录 ...")
targetPath = '%s%s' % ( config.PROJECT_HOME, '\\target')
# 判断 target 目录是否存在,如果存在则删除
if os.path.isdir( targetPath ):
try:
shutil.rmtree( targetPath )
print common.encodeChinese("> 完成!")
print '-------------------------------'
print ''
except Exception as inst :
print common.encodeChinese(config.ERR600)
if not config.AUTOTEST:
common.pause()
os._exit(0) | {
"content_hash": "769f30cd379992a6ac5638ae797cee55",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 57,
"avg_line_length": 24.454545454545453,
"alnum_prop": 0.6524163568773235,
"repo_name": "xxd3vin/spp-sdk",
"id": "c85f9c106158a51782a5aad8b0c225133dd63228",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/script/python/freetarget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "759663"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "56155"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "3065"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "JavaScript",
"bytes": "163687"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Pascal",
"bytes": "8738"
},
{
"name": "Python",
"bytes": "22177886"
},
{
"name": "Shell",
"bytes": "15704"
},
{
"name": "Tcl",
"bytes": "2065501"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import argparse
import autopy
import socket
import time
from sockutil import set_tcp_keepalive
def handleCommand(command):
# print command # XXX
if command == 'D':
print 'Page Down'
autopy.key.tap(autopy.key.K_PAGEDOWN)
elif command == 'U':
print 'Page Up'
autopy.key.tap(autopy.key.K_PAGEUP)
def connectAndListen(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Turn on keepalive with a 1 second timeout, 1 attempt (not set on Mac and
# Windows), and 1 second interval between tries (not set on Mac). Small values
# because some network noise is better than a performance ruined by a page
# that didn't turn.
try:
sock.connect((host, port))
set_tcp_keepalive(sock, True, 1, 1, 1)
except socket.error as error:
print 'Failed to connect:', error
return
print 'Connected'
while True:
char = ""
try:
char = sock.recv(1)
except socket.error:
pass
if not char:
print 'Connection lost'
return
handleCommand(char)
def main(args):
while True:
print 'Attempting to connect to', args.host + ':' + str(args.port)
connectAndListen(args.host, args.port)
time.sleep(1)
# TODO: Handle ctrl-c
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Remote page turn client')
parser.add_argument('host', type=str,
help='the host, typically running Bitwig Studio and the \
McMillen12Step-bitwig controller script')
parser.add_argument('-p', '--port', type=int, default=32313,
help='the port to connect to (default 32313)')
main(parser.parse_args())
| {
"content_hash": "80fc437dc908e71679e9c7f4477cdb2f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 80,
"avg_line_length": 28.910714285714285,
"alnum_prop": 0.6757257566399012,
"repo_name": "aarmea/McMillen12Step-bitwig",
"id": "46c4011eb1584c278fa3d360f106d03bd544e93d",
"size": "1642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/PageTurner.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "12032"
},
{
"name": "Python",
"bytes": "6680"
}
],
"symlink_target": ""
} |
import pecan
import wsmeext.pecan as wsme_pecan
from cloudkitty.api.v1 import types as ck_types
from cloudkitty import rating
from cloudkitty.rating.hash.datamodels import group as group_models
from cloudkitty.rating.hash.datamodels import threshold as threshold_models
from cloudkitty.rating.hash.db import api as db_api
class HashMapThresholdsController(rating.RatingRestControllerBase):
"""Controller responsible of thresholds management.
"""
_custom_actions = {
'group': ['GET']}
@wsme_pecan.wsexpose(group_models.Group,
ck_types.UuidType())
def group(self, threshold_id):
"""Get the group attached to the threshold.
:param threshold_id: UUID of the threshold to filter on.
"""
hashmap = db_api.get_instance()
try:
group_db = hashmap.get_group_from_threshold(
uuid=threshold_id)
return group_models.Group(**group_db.export_model())
except db_api.ThresholdHasNoGroup as e:
pecan.abort(404, e.args[0])
@wsme_pecan.wsexpose(threshold_models.ThresholdCollection,
ck_types.UuidType(),
ck_types.UuidType(),
ck_types.UuidType(),
bool,
ck_types.UuidType(),
bool,
status_code=200)
def get_all(self,
service_id=None,
field_id=None,
group_id=None,
no_group=False,
tenant_id=None,
filter_tenant=False):
"""Get the threshold list
:param service_id: Service UUID to filter on.
:param field_id: Field UUID to filter on.
:param group_id: Group UUID to filter on.
:param no_group: Filter on orphaned thresholds.
:param tenant_id: Tenant UUID to filter on.
:param filter_tenant: Explicitly filter on tenant (default is to not
filter on tenant). Useful if you want to filter on tenant being None.
:return: List of every thresholds.
"""
hashmap = db_api.get_instance()
threshold_list = []
search_opts = dict()
if filter_tenant:
search_opts['tenant_uuid'] = tenant_id
thresholds_uuid_list = hashmap.list_thresholds(
service_uuid=service_id,
field_uuid=field_id,
group_uuid=group_id,
no_group=no_group,
**search_opts)
for threshold_uuid in thresholds_uuid_list:
threshold_db = hashmap.get_threshold(uuid=threshold_uuid)
threshold_list.append(threshold_models.Threshold(
**threshold_db.export_model()))
res = threshold_models.ThresholdCollection(thresholds=threshold_list)
return res
@wsme_pecan.wsexpose(threshold_models.Threshold,
ck_types.UuidType())
def get_one(self, threshold_id):
"""Return a threshold.
:param threshold_id: UUID of the threshold to filter on.
"""
hashmap = db_api.get_instance()
try:
threshold_db = hashmap.get_threshold(uuid=threshold_id)
return threshold_models.Threshold(
**threshold_db.export_model())
except db_api.NoSuchThreshold as e:
pecan.abort(404, e.args[0])
@wsme_pecan.wsexpose(threshold_models.Threshold,
body=threshold_models.Threshold,
status_code=201)
def post(self, threshold_data):
"""Create a threshold.
:param threshold_data: Informations about the threshold to create.
"""
hashmap = db_api.get_instance()
try:
threshold_db = hashmap.create_threshold(
level=threshold_data.level,
map_type=threshold_data.map_type,
cost=threshold_data.cost,
field_id=threshold_data.field_id,
group_id=threshold_data.group_id,
service_id=threshold_data.service_id,
tenant_id=threshold_data.tenant_id)
pecan.response.location = pecan.request.path_url
if pecan.response.location[-1] != '/':
pecan.response.location += '/'
pecan.response.location += threshold_db.threshold_id
return threshold_models.Threshold(
**threshold_db.export_model())
except db_api.ThresholdAlreadyExists as e:
pecan.abort(409, e.args[0])
except db_api.ClientHashMapError as e:
pecan.abort(400, e.args[0])
@wsme_pecan.wsexpose(None,
ck_types.UuidType(),
body=threshold_models.Threshold,
status_code=302)
def put(self, threshold_id, threshold):
"""Update a threshold.
:param threshold_id: UUID of the threshold to update.
:param threshold: Threshold data to insert.
"""
hashmap = db_api.get_instance()
try:
hashmap.update_threshold(
threshold_id,
threshold_id=threshold.threshold_id,
level=threshold.level,
cost=threshold.cost,
map_type=threshold.map_type,
group_id=threshold.group_id,
tenant_id=threshold.tenant_id)
pecan.response.headers['Location'] = pecan.request.path
except db_api.ThresholdAlreadyExists as e:
pecan.abort(409, e.args[0])
except db_api.NoSuchThreshold as e:
pecan.abort(404, e.args[0])
except db_api.ClientHashMapError as e:
pecan.abort(400, e.args[0])
@wsme_pecan.wsexpose(None,
ck_types.UuidType(),
status_code=204)
def delete(self, threshold_id):
"""Delete a threshold.
:param threshold_id: UUID of the threshold to delete.
"""
hashmap = db_api.get_instance()
try:
hashmap.delete_threshold(uuid=threshold_id)
except db_api.NoSuchThreshold as e:
pecan.abort(404, e.args[0])
| {
"content_hash": "4e31269c2f81312aaabbb1f4518a9816",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 77,
"avg_line_length": 38.34567901234568,
"alnum_prop": 0.5692208628461043,
"repo_name": "openstack/cloudkitty",
"id": "f5a730553afaea515a254584aa8a128a23e6874d",
"size": "6844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkitty/rating/hash/controllers/threshold.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "4904"
},
{
"name": "Python",
"bytes": "1046196"
},
{
"name": "Shell",
"bytes": "16361"
}
],
"symlink_target": ""
} |
"""Test the listtransactions API."""
from decimal import Decimal
from io import BytesIO
from test_framework.mininode import CTransaction, COIN
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
bytes_to_hex_str,
hex_str_to_bytes,
sync_mempools,
)
def tx_from_hex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [['-deprecatedrpc=accounts']] * 2
self.enable_mocktime()
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "account": "", "amount": Decimal("-0.1"), "confirmations": 0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "account": "", "amount": Decimal("0.1"), "confirmations": 0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "account": "", "amount": Decimal("-0.1"), "confirmations": 1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "account": "", "amount": Decimal("0.1"), "confirmations": 1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
{"amount": Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "receive"},
{"amount": Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = {self.nodes[0].getnewaddress(): 0.11,
self.nodes[1].getnewaddress(): 0.22,
self.nodes[0].getaccountaddress("from1"): 0.33,
self.nodes[1].getaccountaddress("toself"): 0.44}
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.11")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.11")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.33")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.33")},
{"txid": txid, "account": "from1"})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.44")},
{"txid": txid, "account": ""})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.44")},
{"txid": txid, "account": "toself"})
pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category": "receive", "amount": Decimal("0.1")},
{"txid": txid, "account": "watchonly"})
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = tx_from_hex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| {
"content_hash": "4047e9996fbec9d7dbf61f0823ca902c",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 114,
"avg_line_length": 51.28431372549019,
"alnum_prop": 0.572739437965972,
"repo_name": "donaloconnor/bitcoin",
"id": "883942cc19d9e883f8dab6f61b159432660df025",
"size": "10676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/wallet_listtransactions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "682817"
},
{
"name": "C++",
"bytes": "5672414"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "197067"
},
{
"name": "Makefile",
"bytes": "114403"
},
{
"name": "Objective-C",
"bytes": "148160"
},
{
"name": "Objective-C++",
"bytes": "6763"
},
{
"name": "Python",
"bytes": "1342159"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "75866"
}
],
"symlink_target": ""
} |
from .. import Parser, parser, get_active_lines, LegacyItemAccess
from insights.specs import Specs
@parser(Specs.rhn_hibernate_conf)
class RHNHibernateConf(LegacyItemAccess, Parser):
def parse_content(self, content):
"""
Parses rhn_hibernate.conf and returns a dict.
- {
"hibernate.c3p0.min_size": '5'
"hibernate.c3p0.preferredTestQuery": "select 'c3p0 ping' from dual"
}
"""
hb_dict = {}
for line in get_active_lines(content):
if '=' in line:
key, _, value = line.partition('=')
hb_dict[key.strip()] = value.strip()
self.data = hb_dict
| {
"content_hash": "85f48beab71a1878075736ebb033f3f3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.5752212389380531,
"repo_name": "RedHatInsights/insights-core",
"id": "b342775526b205a97333c2d5a1c3bec78dacdea8",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/parsers/rhn_hibernate_conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "8219046"
},
{
"name": "Shell",
"bytes": "1754"
}
],
"symlink_target": ""
} |
from __future__ import division
from math import sqrt
import kafka_utils.kafka_cluster_manager.cluster_info.stats as stats
def test_mean():
assert stats.mean([1, 2, 3, 4, 5]) == 3
def test_variance():
assert stats.variance([1, 2, 3, 4, 5]) == 2
def test_standard_deviation():
assert stats.standard_deviation([1, 2, 3, 4, 5]) == sqrt(2)
def test_coefficient_of_variation():
assert stats.coefficient_of_variation([1, 2, 3, 4, 5]) == sqrt(2) / 3
def test_get_net_imbalance_balanced_equal():
assert stats.get_net_imbalance([3, 3, 3, 3, 3]) == 0
def test_get_net_imbalance_balanced_unequal():
assert stats.get_net_imbalance([3, 4, 3, 4, 3]) == 0
def test_get_net_imbalance_imbalanced_equal():
assert stats.get_net_imbalance([3, 2, 3, 4, 3]) == 1
def test_get_net_imbalance_imbalanced_unequal():
assert stats.get_net_imbalance([3, 2, 4, 4, 4]) == 1
def test_get_extra_element_count_optimal_none_allowed():
assert stats.get_extra_element_count(2, 2, 0) == (0, 0)
def test_get_extra_element_count_optimal_one_allowed():
assert stats.get_extra_element_count(2, 2, 1) == (0, 1)
def test_get_extra_element_count_extra_none_allowed():
assert stats.get_extra_element_count(3, 2, 0) == (1, 0)
def test_get_extra_element_count_extra_one_allowed():
assert stats.get_extra_element_count(3, 2, 1) == (0, 0)
def test_get_extra_element_count_multiple_extra_one_allowed():
assert stats.get_extra_element_count(4, 2, 1) == (1, 0)
def test_get_replication_group_imbalance_stats(create_cluster_topology):
ct = create_cluster_topology()
rgs = [ct.rgs['rg1'], ct.rgs['rg2']]
partitions = ct.partitions.values()
net_imbalance, extra_replica_cnt_per_rg = \
stats.get_replication_group_imbalance_stats(rgs, partitions)
assert extra_replica_cnt_per_rg['rg1'] == 1
assert extra_replica_cnt_per_rg['rg2'] == 1
assert net_imbalance == 2
def test_get_broker_partition_counts(create_cluster_topology):
ct = create_cluster_topology()
brokers = sorted(ct.brokers.values(), key=lambda b: b.id)
counts = stats.get_broker_partition_counts(brokers)
assert counts == [4, 5, 6, 3, 1]
def test_get_broker_weights(create_cluster_topology):
ct = create_cluster_topology()
brokers = sorted(ct.brokers.values(), key=lambda b: b.id)
weights = stats.get_broker_weights(brokers)
assert weights == [24.0, 26.0, 27.0, 12.0, 8.0]
def test_get_broker_leader_counts(create_cluster_topology):
ct = create_cluster_topology()
brokers = sorted(ct.brokers.values(), key=lambda b: b.id)
counts = stats.get_broker_leader_counts(brokers)
assert counts == [4, 1, 2, 0, 0]
def test_get_broker_leader_weights(create_cluster_topology):
ct = create_cluster_topology()
brokers = sorted(ct.brokers.values(), key=lambda b: b.id)
weights = stats.get_broker_leader_weights(brokers)
assert weights == [24.0, 2.0, 9.0, 0.0, 0.0]
def test_get_topic_imbalance_stats(create_cluster_topology):
ct = create_cluster_topology()
brokers = sorted(ct.brokers.values(), key=lambda b: b.id)
topics = ct.topics.values()
net_imbalance, extra_partition_cnt_per_broker = \
stats.get_topic_imbalance_stats(brokers, topics)
expected_extra = {'0': 0, '1': 1, '2': 1, '3': 1, '4': 0}
assert net_imbalance == 3
assert extra_partition_cnt_per_broker == expected_extra
def test_get_weighted_topic_imbalance_stats(create_cluster_topology):
ct = create_cluster_topology()
brokers = sorted(ct.brokers.values(), key=lambda b: b.id)
topics = ct.topics.values()
total_imbalance, weighted_imbalance_per_broker = \
stats.get_weighted_topic_imbalance_stats(brokers, topics)
# 97 is the total weight of the cluster.
expected = {
'0': 0,
'1': 45 / 97,
'2': 10 / 97,
'3': 36 / 97,
'4': 0,
}
expected_imbalance = 91 / 97
assert abs(total_imbalance - expected_imbalance) < 1e-05
assert weighted_imbalance_per_broker == expected
def test_get_partition_movement_stats(create_cluster_topology):
ct = create_cluster_topology()
base_assignment = ct.assignment
# Move (T0,0) 2 -> 4
# Move (T0,1) 3 -> 1
# Move (T3,0) 1 -> 3
# Move (T3,0) 2 -> 4
# Change leader (T1, 0) 0 -> 1
new_assignment = {
(u'T0', 0): ['1', '4'],
(u'T0', 1): ['2', '1'],
(u'T1', 0): ['1', '0', '2', '3'],
(u'T1', 1): ['0', '1', '2', '3'],
(u'T2', 0): ['2'],
(u'T3', 0): ['0', '3', '4'],
(u'T3', 1): ['0', '1', '4'],
}
ct.update_cluster_topology(new_assignment)
movement_count, movement_size, leader_changes = \
stats.get_partition_movement_stats(ct, base_assignment)
assert movement_count == 4
assert movement_size == 23.0
assert leader_changes == 1
| {
"content_hash": "e1bf7f47f9d0fb4358b31ca3d9e8b363",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 73,
"avg_line_length": 28.6,
"alnum_prop": 0.6293706293706294,
"repo_name": "anthonysandrin/kafka-utils",
"id": "382a8f8f25a97b54d7bcbe09e8ee652f18226991",
"size": "5459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kafka_cluster_manager/stats_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "10840"
},
{
"name": "Makefile",
"bytes": "523"
},
{
"name": "Python",
"bytes": "741325"
},
{
"name": "Shell",
"bytes": "1734"
}
],
"symlink_target": ""
} |
import array
from collections import defaultdict
import numpy as np
import tensorflow as tf
def load_vocab(filename):
vocab = None
with open(filename) as f:
vocab = f.read().splitlines()
dct = defaultdict(int)
for idx, word in enumerate(vocab):
dct[word] = idx
return [vocab, dct]
def load_glove_vectors(filename, vocab):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
tf.logging.info("Found {} out of {} vectors in Glove".format(num_vectors, len(vocab)))
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def build_initial_embedding_matrix(vocab_dict, glove_dict, glove_vectors, embedding_dim):
initial_embeddings = np.random.uniform(-0.25, 0.25, (len(vocab_dict), embedding_dim)).astype("float32")
for word, glove_word_idx in glove_dict.items():
word_idx = vocab_dict.get(word)
initial_embeddings[word_idx, :] = glove_vectors[glove_word_idx]
return initial_embeddings
| {
"content_hash": "0bb077c9af22fc10f983f0d989e997cf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 107,
"avg_line_length": 33.630434782608695,
"alnum_prop": 0.6160310277957337,
"repo_name": "excel-analytics/telegram_chat_bot",
"id": "6bb0d88b7a12ea2e73c29ef11b20af4c4da7dac7",
"size": "1547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "retrieval_based/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105247"
}
],
"symlink_target": ""
} |
import os
import eventlet
from eventlet import event
from eventlet.green import socket
from tests import LimitedTestCase, s2b, skip_if_no_ssl
certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
class TestServe(LimitedTestCase):
def setUp(self):
super(TestServe, self).setUp()
from eventlet import debug
debug.hub_exceptions(False)
def tearDown(self):
super(TestServe, self).tearDown()
from eventlet import debug
debug.hub_exceptions(True)
def test_exiting_server(self):
# tests that the server closes the client sock on handle() exit
def closer(sock,addr):
pass
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, closer)
client = eventlet.connect(('localhost', l.getsockname()[1]))
client.sendall(s2b('a'))
self.assertFalse(client.recv(100))
gt.kill()
def test_excepting_server(self):
# tests that the server closes the client sock on handle() exception
def crasher(sock,addr):
sock.recv(1024)
0//0
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, crasher)
client = eventlet.connect(('localhost', l.getsockname()[1]))
client.sendall(s2b('a'))
self.assertRaises(ZeroDivisionError, gt.wait)
self.assertFalse(client.recv(100))
def test_excepting_server_already_closed(self):
# same as above but with explicit clsoe before crash
def crasher(sock,addr):
sock.recv(1024)
sock.close()
0//0
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, crasher)
client = eventlet.connect(('localhost', l.getsockname()[1]))
client.sendall(s2b('a'))
self.assertRaises(ZeroDivisionError, gt.wait)
self.assertFalse(client.recv(100))
def test_called_for_each_connection(self):
hits = [0]
def counter(sock, addr):
hits[0]+=1
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, counter)
for i in xrange(100):
client = eventlet.connect(('localhost', l.getsockname()[1]))
self.assertFalse(client.recv(100))
gt.kill()
self.assertEqual(100, hits[0])
def test_blocking(self):
l = eventlet.listen(('localhost', 0))
x = eventlet.with_timeout(0.01,
eventlet.serve, l, lambda c,a: None,
timeout_value="timeout")
self.assertEqual(x, "timeout")
def test_raising_stopserve(self):
def stopit(conn, addr):
raise eventlet.StopServe()
l = eventlet.listen(('localhost', 0))
# connect to trigger a call to stopit
gt = eventlet.spawn(eventlet.connect,
('localhost', l.getsockname()[1]))
eventlet.serve(l, stopit)
gt.wait()
def test_concurrency(self):
evt = event.Event()
def waiter(sock, addr):
sock.sendall(s2b('hi'))
evt.wait()
l = eventlet.listen(('localhost', 0))
gt = eventlet.spawn(eventlet.serve, l, waiter, 5)
def test_client():
c = eventlet.connect(('localhost', l.getsockname()[1]))
# verify the client is connected by getting data
self.assertEquals(s2b('hi'), c.recv(2))
return c
clients = [test_client() for i in xrange(5)]
# very next client should not get anything
x = eventlet.with_timeout(0.01,
test_client,
timeout_value="timed out")
self.assertEquals(x, "timed out")
@skip_if_no_ssl
def test_wrap_ssl(self):
server = eventlet.wrap_ssl(eventlet.listen(('localhost', 0)),
certfile=certificate_file,
keyfile=private_key_file, server_side=True)
port = server.getsockname()[1]
def handle(sock,addr):
sock.sendall(sock.recv(1024))
raise eventlet.StopServe()
eventlet.spawn(eventlet.serve, server, handle)
client = eventlet.wrap_ssl(eventlet.connect(('localhost', port)))
client.sendall("echo")
self.assertEquals("echo", client.recv(1024))
def test_socket_reuse(self):
lsock1 = eventlet.listen(('localhost',0))
port = lsock1.getsockname()[1]
def same_socket():
return eventlet.listen(('localhost',port))
self.assertRaises(socket.error,same_socket)
lsock1.close()
assert same_socket()
| {
"content_hash": "b6cb03722ac87761a746993115031e1e",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 79,
"avg_line_length": 36.45454545454545,
"alnum_prop": 0.580423940149626,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "08e370588c2a6b1c7b2522bbf6374b48d12cadab",
"size": "4812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/eventlet/tests/convenience_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import datetime
from maka.data.Field import Date
from FieldTests import FieldTests, fieldTestClass
@fieldTestClass
class DateFieldTests(FieldTests):
fieldClass = Date
validValue = datetime.date(2013, 7, 29)
invalidValue = ''
defaultTypeName = 'date'
| {
"content_hash": "31e5ce18f95c9afbdb5e5d338bad0e51",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 18.166666666666668,
"alnum_prop": 0.7064220183486238,
"repo_name": "HaroldMills/Maka",
"id": "4c468749d61bcb4bd09e4f2548c997375c6917a5",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/DateFieldTests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "222632"
}
],
"symlink_target": ""
} |
"""
Utilities for PDF rendering from HTML using WeasyPrint.
Note that you need to add https://pypi.org/project/weasyprint/ to your dependencies
if you want to make use of HTML-to-PDF rendering. This is not included by default as
it's quite heavy and requires OS-level dependencies.
This module exposes the public function :func:`render_to_pdf` which renders a template
with a context into a PDF document (bytes output). You can use "external" stylesheets
in these templates, and they will be resolved through django's staticfiles machinery
by the custom :class:`UrlFetcher`.
"""
import logging
import mimetypes
from io import BytesIO
from pathlib import PurePosixPath
from typing import Optional, Tuple
from urllib.parse import ParseResult, urljoin, urlparse
from django.conf import settings
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import FileSystemStorage, default_storage
from django.template.loader import render_to_string
import weasyprint
logger = logging.getLogger(__name__)
__all__ = ["render_to_pdf"]
def get_base_url() -> str:
"""
Get the base URL where the project is served.
You should tweak this after starting the project with a solution fitting
your project, as we cannot guess your set-up or where your project is hosted.
The base URL is required to be able to download/resolve custom fonts and/or any
image URLs included in the document to render.
"""
# some hints:
# * define a setting `BASE_URL` in your settings matching the canonical domain where
# your project is deployed
# * if you only need to serve static assets (=no user-uploaded content), you can use
# a dummy URL like "https://{{ project_name|lower }}.dev"
return settings.BASE_URL
class UrlFetcher:
"""
URL fetcher that skips the network for /static/* files.
"""
def __init__(self):
self.static_url = self._get_fully_qualified_url(settings.STATIC_URL)
is_static_local_storage = issubclass(
staticfiles_storage.__class__, FileSystemStorage
)
self.media_url = self._get_fully_qualified_url(settings.MEDIA_URL)
is_media_local_storage = issubclass(
default_storage.__class__, FileSystemStorage
)
self.candidates = (
(self.static_url, staticfiles_storage, is_static_local_storage),
(self.media_url, default_storage, is_media_local_storage),
)
@staticmethod
def _get_fully_qualified_url(setting: str):
fully_qualified_url = setting
if not urlparse(setting).netloc:
fully_qualified_url = urljoin(get_base_url(), setting)
return urlparse(fully_qualified_url)
def __call__(self, url: str) -> dict:
orig_url = url
parsed_url = urlparse(url)
candidate = self.get_match_candidate(parsed_url)
if candidate is not None:
base_url, storage = candidate
path = PurePosixPath(parsed_url.path).relative_to(base_url.path)
absolute_path = None
if storage.exists(path):
absolute_path = storage.path(path)
elif settings.DEBUG and storage is staticfiles_storage:
# use finders so that it works in dev too, we already check that it's
# using filesystem storage earlier
absolute_path = finders.find(str(path))
if absolute_path is None:
logger.error("Could not resolve path '%s'", path)
return weasyprint.default_url_fetcher(orig_url)
content_type, encoding = mimetypes.guess_type(absolute_path)
result = dict(
mime_type=content_type,
encoding=encoding,
redirected_url=orig_url,
filename=path.parts[-1],
)
with open(absolute_path, "rb") as f:
result["file_obj"] = BytesIO(f.read())
return result
return weasyprint.default_url_fetcher(orig_url)
def get_match_candidate(
self, url: ParseResult
) -> Optional[Tuple[ParseResult, FileSystemStorage]]:
for parsed_base_url, storage, is_local_storage in self.candidates:
if not is_local_storage:
continue
same_base = (parsed_base_url.scheme, parsed_base_url.netloc) == (
url.scheme,
url.netloc,
)
if not same_base:
continue
if not url.path.startswith(parsed_base_url.path):
continue
return (parsed_base_url, storage)
return None
def render_to_pdf(template_name: str, context: dict) -> Tuple[str, bytes]:
"""
Render a (HTML) template to PDF with the given context.
"""
rendered_html = render_to_string(template_name, context=context)
html_object = weasyprint.HTML(
string=rendered_html,
url_fetcher=UrlFetcher(),
base_url=get_base_url(),
)
pdf: bytes = html_object.write_pdf()
return rendered_html, pdf | {
"content_hash": "14d43ee12efba3351e9793759cbe06b3",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 88,
"avg_line_length": 36.24647887323944,
"alnum_prop": 0.6438702156596076,
"repo_name": "Clarity-89/clarityv2",
"id": "a4c7cbccf2dde9952149c60d732c4c5475b1600e",
"size": "5147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clarityv2/utils/pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "272"
},
{
"name": "Dockerfile",
"bytes": "2230"
},
{
"name": "HTML",
"bytes": "46778"
},
{
"name": "JavaScript",
"bytes": "5460"
},
{
"name": "Python",
"bytes": "131598"
},
{
"name": "SCSS",
"bytes": "18878"
},
{
"name": "Shell",
"bytes": "2008"
}
],
"symlink_target": ""
} |
import os
from optparse import OptionParser
from subprocess import call, Popen, PIPE
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django-core.core"
else:
return "django-core.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep -v '^@@|^[-+]#|^..POT-Creation' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip()) - 4
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
contrib_dirs = _get_locale_dirs(include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating main en catalog")
call_command('makemessages', locale='en')
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
# Contrib catalogs
for name, dir_ in contrib_dirs:
if resources and not name in resources:
continue
os.chdir(os.path.join(dir_, '..'))
print("Updating en catalog in %s" % dir_)
if name.endswith('-js'):
call_command('makemessages', locale='en', domain='djangojs')
else:
call_command('makemessages', locale='en')
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs()
for name, dir_ in locale_dirs:
if resources and not name in resources:
continue
print("\nShowing translations stats for '%s':" % name)
langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
for lang in langs:
if languages and not lang in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
else:
print("Errors happened when checking %s translation for %s:\n%s" % (
lang, name, errors))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs()
errors = []
for name, dir_ in locale_dirs:
if resources and not name in resources:
continue
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f' % {'res': _tx_resource_for_name(name)}, shell=True)
languages = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in languages:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
call('msgcat -o %s %s' % (po_path, po_path), shell=True)
res = call('msgfmt -c -o %s.mo %s' % (po_path[:-3], po_path), shell=True)
if res != 0:
errors.append((name, lang))
if errors:
print("\nWARNING: Errors have occurred in following cases:")
for resource, lang in errors:
print("\tResource %s for language %s" % (resource, lang))
exit(1)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = OptionParser(usage="usage: %prog [options] cmd")
parser.add_option("-r", "--resources", action='append',
help="limit operation to the specified resources")
parser.add_option("-l", "--languages", action='append',
help="limit operation to the specified languages")
options, args = parser.parse_args()
if not args:
parser.print_usage()
exit(1)
if args[0] in RUNABLE_SCRIPTS:
eval(args[0])(options.resources, options.languages)
else:
print("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS))
| {
"content_hash": "27bfc7822a9809f430f863809c0bc0d6",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 98,
"avg_line_length": 38.12337662337662,
"alnum_prop": 0.5835462442514052,
"repo_name": "ericholscher/django",
"id": "5460eef25e2e8c7209019d2363c635e7546b522e",
"size": "6618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/manage_translations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51177"
},
{
"name": "JavaScript",
"bytes": "102377"
},
{
"name": "Python",
"bytes": "9011891"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
from .alignment import Alignment
from .tasks import TreeCollectionTaskInterface
from .utils.decorators import lazyprop
from .utils import concatenate, fileIO
from Bio import AlignIO
__author__ = 'kgori'
class Concatenation(object):
"""docstring for Concatenation"""
def __init__(self, collection, indices):
super(Concatenation, self).__init__()
if any((x > len(collection)) for x in indices):
raise ValueError('Index out of bounds in {}'.format(indices))
if any((x < 0) for x in indices) < 0:
raise ValueError('Index out of bounds in {}'.format(indices))
if any((not isinstance(x, int)) for x in indices):
raise ValueError('Integers only in indices, please: {}'
.format(indices))
self.collection = collection
self.indices = sorted(indices)
def __len__(self):
return len(self.indices)
@lazyprop
def distances(self):
return [self.collection.distances[i] for i in self.indices]
@lazyprop
def variances(self):
return [self.collection.variances[i] for i in self.indices]
@lazyprop
def frequencies(self):
return [self.collection.frequencies[i] for i in self.indices]
@lazyprop
def alphas(self):
return [self.collection.alphas[i] for i in self.indices]
@lazyprop
def datatypes(self):
return [self.collection.datatypes[i] for i in self.indices]
@lazyprop
def alignment(self):
msas = [self.collection[i].to_biopython_msa() for i in self.indices]
conc = concatenate(msas)
for seq in conc: seq.description=''
with fileIO.TempFile() as tmp:
AlignIO.write(conc, tmp, 'fasta')
al = Alignment(tmp, 'fasta', True)
return al
@lazyprop
def names(self):
return [self.collection.names[i] for i in self.indices]
@lazyprop
def lengths(self):
return [self.collection.lengths[i] for i in self.indices]
@lazyprop
def headers(self):
return [self.collection.headers[i] for i in self.indices]
@lazyprop
def coverage(self):
total = self.collection.num_species()
return [self.collection.lengths[i] / total for i in self.indices]
@lazyprop
def trees(self):
return [self.collection.trees[i] for i in self.indices]
@lazyprop
def mrp_tree(self):
trees = [tree.newick if hasattr('newick', tree) else tree for tree in self.trees]
return Alignment().get_mrp_supertree(trees)
def get_tree_collection_strings(self, scale=1, guide_tree=None):
""" Function to get input strings for tree_collection
tree_collection needs distvar, genome_map and labels -
these are returned in the order above
"""
records = [self.collection[i] for i in self.indices]
return TreeCollectionTaskInterface().scrape_args(records)
def qfile(self, models=None, default_dna='DNA', default_protein='LG', sep_codon_pos=False,
ml_freqs=False, emp_freqs=False, per_locus=True):
from_ = 1
to_ = 0
qs = list()
if models is None:
if ml_freqs:
default_dna += 'X'
default_protein += 'X'
if emp_freqs and not ml_freqs:
default_protein += 'F'
default_models = dict(dna=default_dna, protein=default_protein)
models = [default_models[m] for m in self.datatypes]
if per_locus:
for (length, name, datatype, model) in zip(self.lengths, self.names,
self.datatypes, models):
to_ += length
if datatype == 'dna' and sep_codon_pos:
qs.append('{}, {} = {}-{}/3'.format(model, name, from_,
to_))
qs.append('{}, {} = {}-{}/3'.format(model, name, from_ + 1,
to_))
qs.append('{}, {} = {}-{}/3'.format(model, name, from_ + 2,
to_))
else:
qs.append('{}, {} = {}-{}'.format(model, name, from_,
to_))
from_ += length
return '\n'.join(qs)
else:
total_length = sum(self.lengths)
model = self.models[0]
if datatype == 'dna' and sep_codon_pos:
return '{}, all = 1-{}/3'.format(model, total_length)
else:
return '{}, all = 1-{}'.format(model, total_length)
def paml_partitions(self):
return 'G {} {}'.format(len(self.lengths),
' '.join(str(x) for x in self.lengths))
| {
"content_hash": "d961b01f853a7c92a2e4c0353ddbc831",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 94,
"avg_line_length": 36.85606060606061,
"alnum_prop": 0.5424460431654676,
"repo_name": "kgori/treeCl",
"id": "0581213f2638f5fa5e7f4ed9bdaed2d82139afb5",
"size": "4865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treeCl/concatenation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "41097"
},
{
"name": "C++",
"bytes": "1731778"
},
{
"name": "CMake",
"bytes": "2686"
},
{
"name": "Jupyter Notebook",
"bytes": "76541"
},
{
"name": "Makefile",
"bytes": "251"
},
{
"name": "Python",
"bytes": "428585"
},
{
"name": "Shell",
"bytes": "468"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from .views import InventoryList
urlpatterns = patterns('',
url(r'^inventory/$', InventoryList.as_view()),
) | {
"content_hash": "d4794bc1fee99c9e7ddf2d0da050cadf",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 50,
"avg_line_length": 26,
"alnum_prop": 0.7307692307692307,
"repo_name": "ateoto/django-boulange",
"id": "75ccd139f52c16abac7ac459000e0afc68db9ef8",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boulange/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11493"
}
],
"symlink_target": ""
} |
import unittest
from quickbooks.objects.bill import Bill, BillLine, AccountBasedExpenseLineDetail
class AccountBasedExpenseLineDetailTests(unittest.TestCase):
def test_unicode(self):
acct_detail = AccountBasedExpenseLineDetail()
acct_detail.BillableStatus = "test"
self.assertEquals(unicode(acct_detail), "test")
class BillTests(unittest.TestCase):
def test_unicode(self):
bill = Bill()
bill.Balance = 1000
self.assertEquals(unicode(bill), "1000")
class BillLineTests(unittest.TestCase):
def test_unicode(self):
bill_line = BillLine()
bill_line.Amount = 1000
self.assertEquals(unicode(bill_line), "1000")
| {
"content_hash": "3fc973c29e0311e1e76781d13e8f508a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 81,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.6952789699570815,
"repo_name": "ferdiaz/python-quickbooks",
"id": "483a93af5c2f18123ea4dfa4e3ec6dcac54bc0f5",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/objects/test_bill.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "92199"
}
],
"symlink_target": ""
} |
from collections import Sequence
import unittest
from requests.structures import CaseInsensitiveDict
from docido_sdk.toolbox.edsl import kwargsql
and_ = kwargsql.and_
or_ = kwargsql.or_
xor = kwargsql.xor
class TestKwargSQL(unittest.TestCase):
d = {
's': 's_value',
'i': 3,
'nested': {
'val': 'nested-value',
'another_key': 42,
},
'array': [4, 5, 6],
'exc': Exception("Error: a comprehensive message"),
'nestedl': [
dict(foo=1),
dict(foo=2, bar=3),
],
}
def test_sequence_get(self):
self.assertEquals(list(kwargsql.get(self.d, 'nestedl__any__foo')),
[1, 2])
# nested elements that produce an error are discarded from the result.
self.assertEquals(list(kwargsql.get(self.d, 'nestedl__any__bar')),
[3])
self.assertEquals(list(kwargsql.get(self.d, 'nestedl__any__unknown')),
[])
# It makes not difference to call `any` or `each` is `kwargsql.get`
# because it matters when there is an operation to perform on the
# result data.
self.assertEquals(list(kwargsql.get(self.d, 'nestedl__each__foo')),
[1, 2])
self.assertEquals(list(kwargsql.get(self.d, 'nestedl__each__bar')),
[3])
self.assertEquals(list(kwargsql.get(self.d, 'nestedl__any__unknown')),
[])
def test_sequence_logical(self):
# at least one element of `nestedl` has the `foo` attribute
self.assertTrue(and_(self.d, nestedl__any__contains='foo'))
# at least one element of `nestedl` has the `bar` attribute
self.assertTrue(and_(self.d, nestedl__any__contains='bar'))
# all elements of `nestedl` have the 'foo' attribute
self.assertTrue(and_(self.d, nestedl__each__contains='foo'))
# not all elements of `nestedl` have the 'bar' attribute
self.assertFalse(and_(self.d, nestedl__each__contains='bar'))
# can run operation (here equality) on an empty sequence
self.assertFalse(and_(self.d, nestedl__each__unknown='bar'))
# can run operation (here equality) on an empty sequence
self.assertFalse(or_(self.d, nestedl__each__unknown='bar'))
def test_operations(self):
self.assertFalse(kwargsql.OPERATIONS['ne']('a', u'a'))
self.assertTrue(kwargsql.OPERATIONS['ne']('a', 42))
self.assertFalse(kwargsql.OPERATIONS['lt'](42, 42))
self.assertTrue(kwargsql.OPERATIONS['lt'](41, 42))
self.assertTrue(kwargsql.OPERATIONS['lte'](42, 42))
self.assertFalse(kwargsql.OPERATIONS['gt'](42, 42))
self.assertTrue(kwargsql.OPERATIONS['gt'](42, 41))
self.assertTrue(kwargsql.OPERATIONS['gte'](42, 42))
self.assertTrue(kwargsql.OPERATIONS['in'](1, [2, 3, 1, 4]))
self.assertTrue(kwargsql.OPERATIONS['nin'](0, [1, 2, 3]))
self.assertTrue(kwargsql.OPERATIONS['size']([1, 2, 3], 3))
self.assertTrue(kwargsql.OPERATIONS['iexact']('foo', u'Foo'))
self.assertTrue(kwargsql.OPERATIONS['contains']('abcde', 'bcd'))
self.assertTrue(kwargsql.OPERATIONS['icontains']('abcd', 'bCD'))
self.assertTrue(kwargsql.OPERATIONS['startswith']('abcd', 'abc'))
self.assertTrue(kwargsql.OPERATIONS['istartswith']('abcd', 'aBc'))
self.assertTrue(kwargsql.OPERATIONS['endswith']('abcd', 'bcd'))
self.assertTrue(kwargsql.OPERATIONS['iendswith']('abcd', 'BcD'))
self.assertTrue(kwargsql.OPERATIONS['isinstance']('abcd', basestring))
self.assertTrue(kwargsql.OPERATIONS['issubclass'](str, basestring))
def test_seqexp(self):
d = self.d
self.assertTrue(and_(d, s='s_value', i=3))
self.assertFalse(and_(d, s='s_value', i=1))
self.assertFalse(or_(d, s='not', i='not'))
self.assertTrue(or_(d, s='s_value', i='not'))
self.assertTrue(or_(d, s='not', i=3))
self.assertTrue(or_(d, s='s_value', foo_i=3))
self.assertTrue(xor(d, foo_i=42, s='s_value'))
self.assertFalse(xor(d, foo_i=42, s='unknown'))
def test_simple_op(self):
d = self.d
self.assertTrue(and_(d, nested__size=2))
def test_simple_trailing__(self):
self.assertTrue(and_(self.d, s__='s_value'))
def test_not(self):
d = self.d
self.assertFalse(and_(d, s__not='s_value'))
def test_nested(self):
d = self.d
self.assertTrue(and_(d, nested__val='nested-value'))
self.assertTrue(and_(d, exc__message__istartswith='error: '))
def test_arrays(self):
self.assertTrue(and_(self.d, array__1=5))
def test_invalid(self):
with self.assertRaises(Exception):
and_(self.d, __=42)
def test_exist_operation(self):
self.assertFalse(and_(self.d, nested__unknown__exists=1))
self.assertFalse(and_(self.d, exc__unknown__exists=1))
self.assertTrue(and_(self.d, nested__unknown__exists=False))
self.assertTrue(and_(self.d, exc__unknown__exists=False))
def test_get(self):
self.assertEqual(kwargsql.get(self.d, 'nested__val'), 'nested-value')
def test_abc_mappings_navigation(self):
d = dict(foo=CaseInsensitiveDict(bar=CaseInsensitiveDict(pika=42)))
self.assertEqual(kwargsql.get(d, 'foo__bAr__PiKA'), 42)
def test_abc_sequence_select(self):
class DumbSequence(Sequence):
def __len__(self):
return 2
def __getitem__(self, key):
return key + 1
self.assertEqual(kwargsql.get(DumbSequence(), '1'), 2)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f42c4aaf75d368f4600d78888395f236",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 41.114285714285714,
"alnum_prop": 0.5927727588603197,
"repo_name": "cogniteev/docido-python-sdk",
"id": "08c1036aae115d1504c69d5366e3275b3d95fe27",
"size": "5756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_kwargsql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "223570"
}
],
"symlink_target": ""
} |
import types
import json
import sys
from datetime import datetime
import odc
# Logging Levels
Trace = 0
Debug = 1
Info = 2
Warn = 3
Error = 4
Critical = 5
# Exact string values for Event parameters which are passed as strings
# EventTypes, ConnectState,Binary,Analog,Counter,FrozenCounter,BinaryOutputStatus,AnalogOutputStatus,ControlRelayOutputBlock and others...
# QualityFlags, ONLINE,RESTART,COMM_LOST,REMOTE_FORCED,LOCAL_FORCE,OVERRANGE,REFERENCE_ERR,ROLLOVER,DISCONTINUITY,CHATTER_FILTER
# ConnectState, PORT_UP,CONNECTED,DISCONNECTED,PORT_DOWN
# ControlCode, NUL,NUL_CANCEL,PULSE_ON,PULSE_ON_CANCEL,PULSE_OFF,PULSE_OFF_CANCEL,LATCH_ON,LATCH_ON_CANCEL,LATCH_OFF,LATCH_OFF_CANCEL,
# CLOSE_PULSE_ON,CLOSE_PULSE_ON_CANCEL,TRIP_PULSE_ON,TRIP_PULSE_ON_CANCEL,UNDEFINED
class SimPortClass:
''' Our class to handle an ODC Port. We must have __init__, ProcessJSONConfig, Enable, Disable, EventHander, TimerHandler and
RestRequestHandler defined, as they will be called by our c/c++ code.
ODC publishes some functions to this Module (when run) they are part of the odc module(include).
We currently have odc.log, odc.SetTimer and odc.PublishEvent.
'''
# Worker Methods. They need to be high in the code so they are available in the code below. No forward declaration in Python
def LogTrace(self, message ):
odc.log(self.guid, Trace, message )
def LogError(self, message ):
odc.log(self.guid,Error, message )
def LogDebug(self, message ):
odc.log(self.guid, Debug, message )
def LogInfo(self, message ):
odc.log(self.guid,Info, message )
def LogWarn(self, message ):
odc.log(self.guid, Warn, message )
def LogCritical(self, message ):
odc.log(self.guid,Critical, message )
# Required Method
def __init__(self, odcportguid, objectname):
self.objectname = objectname # Documentation/error use only.
self.guid = odcportguid # So that when we call an odc method, ODC can work out which pyport to hand it too.
self.Enabled = False;
self.i = 2
self.ConfigDict = {} # Config Dictionary
self.LogDebug("*********** SimPortClass Init Called - File Version 1.002 - {}".format(objectname))
self.processedevents = 0
return
# Required Method
def Config(self, MainJSON, OverrideJSON):
""" The JSON values are passed as strings (stripped of comments), which we then load into a dictionary for processing
Note that this does not handle Inherits JSON entries correctly (Inherits is effectily an Include file entry)"""
#self.LogDebug("Passed Main JSON Config information - Len {} , {}".format(len(MainJSON),MainJSON))
#self.LogDebug("Passed Override JSON Config information - Len {} , {}".format(len(OverrideJSON), OverrideJSON))
# Load JSON into Dicts
Override = {}
try:
if len(MainJSON) != 0:
self.ConfigDict = json.loads(MainJSON)
if len(OverrideJSON) != 0:
Override = json.loads(OverrideJSON)
except:
self.LogError("Exception on parsing JSON Config data - {}".format(sys.exc_info()[0]))
return
self.LogDebug("JSON Config strings Parsed")
# Now use the override config settings to adjust or add to the MainConfig. Only root json values can be adjusted.
# So you cannot change a single value in a Binary point definition without rewriting the whole "Binaries" json key.
self.ConfigDict.update(Override) # Merges with Override doing just that - no recursion into sub dictionaries
self.LogDebug("Combined (Merged) JSON Config {}".format(json.dumps(self.ConfigDict)))
# Now extract what is needed for this instance, or just reference the ConfigDict when needed.
return
# Required Method
def Operational(self):
""" This is called from ODC once ODC is ready for us to be fully operational - normally after Build is complete"""
self.LogDebug("Port Operational - {}".format(datetime.now().isoformat(" ")))
odc.SetTimer(self.guid, 1, 250) #250 msec
return
# Required Method
def Enable(self):
self.LogTrace("Enabled - {}".format(datetime.now().isoformat(" ")))
self.enabled = True;
return
# Required Method
def Disable(self):
self.LogDebug("Disabled - {}".format(datetime.now().isoformat(" ")))
self.enabled = False
return
# Needs to return True or False, which will be translated into CommandStatus::SUCCESS or CommandStatus::UNDEFINED
# EventType (string) Index (int), Time (msSinceEpoch), Quality (string) Payload (string) Sender (string)
# There is no callback available, the ODC code expects this method to return without delay.
def EventHandler(self,EventType, Index, Time, Quality, Payload, Sender):
self.LogTrace("EventHander: {}, {}, {} {} - {}".format(self.guid,Sender,Index,EventType,Payload))
self.processedevents += 1
if (EventType == "Binary"):
self.LogDebug("Event is a Binary")
if ("ONLINE" not in Quality):
self.LogDebug("Event Quality not ONLINE")
odc.PublishEvent(self.guid,EventType,Index,Quality,Payload) # Echoing Event for testing. Sender, Time auto created in ODC
return True
# Will be called at the appropriate time by the ASIO handler system. Will be passed an id for the timeout,
# so you can have multiple timers running.
def TimerHandler(self,TimerId):
self.LogTrace("TimerHander: ID {}, {}".format(TimerId, self.guid))
if (TimerId == 1):
#currentqueuesize = odc.GetEventQueueSize(self.guid)
#self.LogDebug("TimerHander: Event Queue Size {}".format(currentqueuesize))
# Get Events from the queue and process them
while (True):
JsonEvent, empty = odc.GetNextEvent(self.guid)
if (empty == True):
break
self.processedevents += 1 # Python is single threaded, so no concurrency issues (unless specipically enabled for multi)
odc.SetTimer(self.guid, 1, 250) #250 msec - timer 1 restarts itself!
return
# The Rest response interface - the following method will be called whenever the restful interface (a single interface for all PythonPorts) gets
# called. It will be decoded sufficiently so that it is passed to the correct PythonPort (us)
# To make these calls in Python (our test scripts) we can use the library below.
# https://2.python-requests.org//en/master/
#
# We return the response that we want sent back to the caller. This will be a JSON string. A null string would be an error.
def RestRequestHandler(self, url, content):
self.LogTrace("RestRequestHander: {}".format(url))
Response = {} # Empty Dict
if ("GET" in url):
Response["test"] = "GET"
Response["processedevents"] = self.processedevents
else:
Response["test"] = "POST"
# Just to make sure it gets called and the call succeeds.
currentqueuesize = odc.GetEventQueueSize(self.guid)
odc.SetTimer(self.guid, self.i, 1001-(self.i%1000)) # Set a timer to go off in a period less than a second
self.i = self.i + 1
self.LogTrace("RestRequestHander: Sent Set Timer Command {}".format(self.i))
return json.dumps(Response)
| {
"content_hash": "9e5584f618ca6178aa6bc0d6f353491f",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 148,
"avg_line_length": 46.83229813664596,
"alnum_prop": 0.6631299734748011,
"repo_name": "neilstephens/opendatacon",
"id": "b1f999cc3e012e4f3664e664ecb307dd1b303df9",
"size": "7540",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Code/Ports/PyPort/PythonCode/PyPortSim.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "266"
},
{
"name": "C",
"bytes": "8532616"
},
{
"name": "C++",
"bytes": "2851876"
},
{
"name": "CMake",
"bytes": "71508"
},
{
"name": "CSS",
"bytes": "210"
},
{
"name": "HTML",
"bytes": "44780"
},
{
"name": "JavaScript",
"bytes": "15876"
},
{
"name": "Lua",
"bytes": "23202"
},
{
"name": "Perl",
"bytes": "8126"
},
{
"name": "Python",
"bytes": "175536"
},
{
"name": "Shell",
"bytes": "2283"
}
],
"symlink_target": ""
} |
import os.path
from glob import glob
from PIL import ImageFont
def available():
"""
Returns list of available font names.
"""
font_dir = os.path.dirname(__file__)
names = [os.path.basename(os.path.splitext(f)[0]) for f in glob(os.path.join(font_dir, '*.pil'))]
return sorted(names)
def load(name):
"""
Loads the font specified by name and returns it as an instance of
`PIL.ImageFont <http://pillow.readthedocs.io/en/latest/reference/ImageFont.html>`_
class.
"""
try:
font_dir = os.path.dirname(__file__)
pil_file = os.path.join(font_dir, '{}.pil'.format(name))
return ImageFont.load(pil_file)
except FileNotFoundError:
raise Exception('Failed to load font "{}". '.format(name) +
'Check ev3dev.fonts.available() for the list of available fonts')
| {
"content_hash": "009b074475eb79169e13e377ef34740d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 101,
"avg_line_length": 31.74074074074074,
"alnum_prop": 0.6289381563593932,
"repo_name": "rhempel/ev3dev-lang-python",
"id": "95b480446a5f7fa2c23c88d6f0c7cc4e9d19be4b",
"size": "857",
"binary": false,
"copies": "2",
"ref": "refs/heads/ev3dev-stretch",
"path": "ev3dev2/fonts/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296032"
}
],
"symlink_target": ""
} |
"""
Parse DAT files blocks and traverse block chains.
"""
from binascii import hexlify
from struct import unpack_from
from DAT.Header import Header
class Block:
"""
A block making up a chunk of a Directory in a DAT file.
"""
def __init__(self, filename=None, offset=None, size=None, next_block_offset=None, data=None):
self.filename = filename
self.offset = offset
self.size = size
self.next_block_offset = next_block_offset
self.data = data
def parse(self, blob):
"""
Try to parse a block structure out of the given binary blob.
"""
self.data = unpack_from(str(len(blob[4:])) + "s", blob[4:])[0]
self.next_block_offset = unpack_from("I", blob)[0]
@classmethod
def from_blob(cls, blob):
"""
Return a new Block instance initialized with the result of parsing the
given binary blob.
"""
b = cls()
b.parse(blob)
b.size = len(blob)
return b
@classmethod
def from_file(cls, filename, offset):
"""
Return a new Block instance initialized with the result of parsing the
given file at the given offset.
"""
with open(filename, "rb") as fp:
h = Header.from_file(filename)
fp.seek(offset)
blob = fp.read(h.block_size)
b = cls.from_blob(blob)
b.filename = filename
b.offset = offset
return b
def __iter__(self):
return BlockIterator(self)
def __str__(self):
s = "{filename: " + str(self.filename)
s += ", offset: " + str(hex(self.offset))
s += ", size: " + str(hex(self.size))
s += ", next: " + str(hex(self.next_block_offset))
s += ", data: " + hexlify(self.data)
s += "}"
return s
class BlockIterator:
def __init__(self, first_block):
self.current_block = first_block
self.no_more_blocks = False
def __iter__(self):
return self
def next(self):
if self.no_more_blocks:
raise StopIteration()
else:
if self.current_block.next_block_offset == 0x0:
self.no_more_blocks = True
b = self.current_block
filename = self.current_block.filename
next_block_offset = self.current_block.next_block_offset
self.current_block = Block.from_file(filename, next_block_offset)
return b
class BlockChain:
"""
The result of traversing a series of Block starting at the given Block.
The data held by a BlockChain can be parsed into a Directory.
"""
def __init__(self, start_block):
self.size = 0
self.data = ""
for block in iter(start_block):
self.size += block.size
self.data += block.data
def __str__(self):
s = "{size: " + str(self.size)
s += ", data: " + hexlify(self.data)
s += "}"
return s
| {
"content_hash": "4cc26cc62e551fd50e9ab0b29136de84",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 97,
"avg_line_length": 27.221238938053098,
"alnum_prop": 0.5406371911573472,
"repo_name": "ccressent/acnav",
"id": "18229d863da7bde8d4c8f24f937fae4f13f56a0a",
"size": "3076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DAT/Block.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18749"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
import openpnm as op
import numpy.testing as nt
class TransportTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[9, 9, 9])
self.net.add_model_collection(
op.models.collections.geometry.spheres_and_cylinders)
self.net.regenerate_models()
self.phase = op.phase.Air(network=self.net)
self.phase.add_model_collection(
op.models.collections.physics.basic)
self.phase.regenerate_models()
self.phase['pore.mole_fraction'] = 0
self.phase['throat.diffusive_conductance'] = 1.0
def test_undefined_elements(self):
net = op.network.Cubic(shape=[3, 3, 3])
phase = op.phase.Phase(network=net)
phase["throat.conductance"] = 1.0
alg = op.algorithms.Transport(network=net, phase=phase)
alg.settings._update({"quantity": "pore.concentration",
"conductance": "throat.conductance"})
def test_remove_boundary_conditions(self):
alg = op.algorithms.Transport(network=self.net, phase=self.phase)
alg.set_value_BC(pores=self.net.pores('top'), values=1)
alg.set_value_BC(pores=self.net.pores('bottom'), values=0)
assert np.sum(np.isfinite(alg['pore.bc.value'])) > 0
alg.set_value_BC(pores=self.net.pores('top'), mode='remove')
assert np.sum(np.isfinite(alg['pore.bc.value'])) > 0
alg.set_value_BC(pores=self.net.pores('bottom'), mode='remove')
assert np.sum(np.isfinite(alg['pore.bc.value'])) == 0
def test_generic_transport(self):
alg = op.algorithms.Transport(network=self.net, phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_value_BC(pores=self.net.pores('top'), values=1)
alg.set_value_BC(pores=self.net.pores('bottom'), values=0)
alg.run()
from openpnm.algorithms._solution import SteadyStateSolution
quantity = alg.settings['quantity']
# Ensure solution object is attached to the algorithm
assert isinstance(alg.soln[quantity], SteadyStateSolution)
def test_ill_defined_topology(self):
net = op.network.Cubic(shape=[5, 1, 1])
op.topotools.trim(net, pores=[2])
phase = op.phase.Phase(network=net)
phase["throat.diffusive_conductance"] = 1.0
alg = op.algorithms.Transport(network=net, phase=phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_value_BC(pores=net.pores('left'), values=1)
with pytest.raises(Exception):
alg.run()
def test_linear_system_with_nans_or_infs(self):
net = op.network.Cubic(shape=[5, 1, 1])
phase = op.phase.Phase(network=net)
phase["throat.diffusive_conductance"] = 1.0
alg = op.algorithms.Transport(network=net, phase=phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_value_BC(pores=net.pores('left'), values=1)
# A contains nan
with pytest.raises(Exception):
phase["throat.diffusive_conductance"][1] = np.inf
alg.run()
phase["throat.diffusive_conductance"] = 1.0
# A contains inf
with pytest.raises(Exception):
phase["throat.diffusive_conductance"][1] = np.inf
alg.run()
phase["throat.diffusive_conductance"] = 1.0
# b contains nan
with pytest.raises(Exception):
phase["throat.diffusive_conductance"][1] = np.inf
alg.run()
phase["throat.diffusive_conductance"] = 1.0
# b contains inf
with pytest.raises(Exception):
phase["throat.diffusive_conductance"][1] = np.inf
alg.run()
def test_ill_defined_settings(self):
net = op.network.Cubic(shape=[5, 1, 1])
phase = op.phase.Phase(network=net)
phase["throat.diffusive_conductance"] = 1.0
alg = op.algorithms.Transport(network=net, phase=phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_value_BC(pores=net.pores('left'), values=1)
# conductance is not defined
with pytest.raises(Exception):
alg.settings['conductance'] = None
alg.run()
alg.settings['conductance'] = 'throat.diffusive_conductance'
# quantity is not defined
with pytest.raises(Exception):
alg.settings['quantity'] = None
alg.run()
alg.settings['quantity'] = 'pore.mole_fraction'
# phase is not defined
with pytest.raises(Exception):
alg.settings['phase'] = None
alg.run()
def test_two_value_conditions(self):
alg = op.algorithms.Transport(network=self.net,
phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_value_BC(pores=self.net.pores('top'), values=1)
alg.set_value_BC(pores=self.net.pores('bottom'), values=0)
alg.run()
x = [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]
y = np.unique(np.around(alg['pore.mole_fraction'], decimals=3))
assert np.all(x == y)
def test_one_value_one_rate(self):
alg = op.algorithms.Transport(network=self.net, phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_rate_BC(pores=self.net.pores('bottom'), rates=1)
alg.set_value_BC(pores=self.net.pores('top'), values=0)
alg.run()
x = [0., 1., 2., 3., 4., 5., 6., 7., 8.]
y = np.unique(np.around(alg['pore.mole_fraction'], decimals=3))
assert np.all(x == y)
def test_set_value_bc_where_rate_is_already_set_mode_merge(self):
alg = op.algorithms.Transport(network=self.net, phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_rate_BC(pores=[0, 1], rates=1, mode='add')
with pytest.raises(Exception):
alg.set_value_BC(pores=[1, 2], values=0, mode='add')
assert np.isfinite(alg['pore.bc.rate']).sum() == 2
assert np.isfinite(alg['pore.bc.value']).sum() == 0
alg.set_rate_BC(pores=[0, 1], rates=1, mode='overwrite')
alg.set_value_BC(pores=[1, 2], values=0, mode='overwrite')
assert np.isfinite(alg['pore.bc.rate']).sum() == 1
assert np.isfinite(alg['pore.bc.value']).sum() == 2
def test_cache(self):
alg = op.algorithms.Transport(network=self.net, phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_rate_BC(pores=self.net.pores('bottom'), rates=1)
alg.set_value_BC(pores=self.net.pores('top'), values=0)
alg.settings["cache"] = True
alg.run()
x_before = alg["pore.mole_fraction"].mean()
self.phase["throat.diffusive_conductance"][1] = 50.0
alg.run()
x_after = alg["pore.mole_fraction"].mean()
# When cache is True, A is not recomputed, hence x == y
assert x_before == x_after
alg.settings["cache"] = False
alg.run()
x_after = alg["pore.mole_fraction"].mean()
# When cache is False, A must be recomputed, hence x!= y
assert x_before != x_after
# Revert back changes to objects
self.setup_class()
def test_rate_single_pore(self):
alg = op.algorithms.ReactiveTransport(network=self.net,
phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
pores = self.net.pores("left")
alg.set_rate_BC(pores=pores, rates=1.235*np.ones(pores.size))
alg.set_value_BC(pores=self.net.pores("right"), values=0.0)
alg.run()
rate = alg.rate(pores=self.net.pores("right"))[0]
assert np.isclose(rate, -1.235*self.net.pores("right").size)
# Net rate must always be zero at steady state conditions
assert np.isclose(alg.rate(pores=self.net.Ps), 0.0)
def test_rate_multiple_pores(self):
alg = op.algorithms.Transport(network=self.net, phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_rate_BC(pores=[0, 1, 2, 3], rates=1.235)
alg.set_rate_BC(pores=[5, 6, 19, 35, 0], rates=3.455, mode='overwrite')
# Pore 0 is assigned two rate BCs, only the most recent will be kept
alg.set_value_BC(pores=[50, 51, 52, 53], values=0.0)
alg.run()
rate = alg.rate(pores=[50, 51, 52, 53])[0]
# 3 and 5 are number of pores in each rate BC
assert np.isclose(rate, -(1.235*3 + 3.455*5))
# Net rate must always be zero at steady state conditions
assert np.isclose(alg.rate(pores=self.net.Ps), 0.0)
def test_rate_multiple_values(self):
alg = op.algorithms.Transport(network=self.net,
phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_rate_BC(pores=[0, 1, 2, 3], rates=[0, 3.5, 0.4, -12])
alg.set_value_BC(pores=[50, 51, 52, 53], values=0.0)
alg.run()
rate_individual = alg.rate(pores=[0, 1, 2, 3], mode='single')
rate_net = alg.rate(pores=[0, 1, 2, 3], mode='group')[0]
nt.assert_allclose(rate_individual, [0, 3.5, 0.4, -12], atol=1e-10)
nt.assert_allclose(rate_net, sum([0, 3.5, 0.4, -12]))
# def test_rate_Nt_by_2_conductance(self):
# net = op.network.Cubic(shape=[1, 6, 1])
# net.add_model_collection(
# op.models.collections.geometry.spheres_and_cylinders)
# net.regenerate_models()
# air = op.phase.Air(network=net)
# water = op.phase.Water(network=net)
# m = op.contrib.MultiPhase(network=net, phases=[air, water])
# m.set_occupancy(phase=air, pores=[0, 1, 2])
# m.set_occupancy(phase=water, pores=[3, 4, 5])
# const = op.models.misc.constant
# K_water_air = 0.5
# m.set_binary_partition_coef(phases=[water, air], model=const, value=K_water_air)
# m._set_automatic_throat_occupancy()
# alg = op.algorithms.Transport(network=net, phase=m)
# alg.settings['conductance'] = 'throat.diffusive_conductance'
# alg.settings['quantity'] = 'pore.mole_fraction'
# alg.set_rate_BC(pores=0, values=1.235)
# alg.set_value_BC(pores=5, values=0.0)
# alg.run()
# rate = alg.rate(pores=5)[0]
# assert np.isclose(rate, -1.235)
# # Rate at air-water interface throat (#2) must match imposed rate
# rate = alg.rate(throats=2)[0]
# assert np.isclose(rate, 1.235)
# # Rate at interface pores (#2 @ air-side, #3 @ water-side) must be 0
# rate_air_side = alg.rate(pores=2)[0]
# rate_water_side = alg.rate(pores=3)[0]
# assert np.isclose(rate_air_side, 0.0)
# assert np.isclose(rate_water_side, 0.0)
# # Net rate must always be zero at steady state conditions
# assert np.isclose(alg.rate(pores=net.Ps), 0.0)
# def test_reset_settings_and_data(self):
# alg = op.algorithms.Transport(network=self.net,
# phase=self.phase)
# alg.settings['conductance'] = 'throat.diffusive_conductance'
# alg.settings['quantity'] = 'pore.mole_fraction'
# alg.set_rate_BC(pores=self.net.pores('bottom'), values=1)
# alg.set_value_BC(pores=self.net.pores('top'), values=0)
# alg.run()
# assert ~np.all(np.isnan(alg['pore.bc.value']))
# assert ~np.all(np.isnan(alg['pore.bc.rate']))
# assert 'pore.mole_fraction' in alg.keys()
# alg.reset(bcs=True, results=False)
# assert np.all(np.isnan(alg['pore.bc.value']))
# assert np.all(np.isnan(alg['pore.bc.rate']))
# assert 'pore.mole_fraction' in alg.keys()
# alg.reset(bcs=True, results=True)
# assert 'pore.mole_fraction' not in alg.keys()
# alg.set_rate_BC(pores=self.net.pores('bottom'), values=1)
# alg.set_value_BC(pores=self.net.pores('top'), values=0)
# alg.run()
# def test_reset_actual_results(self):
# alg = op.algorithms.Transport(network=self.net,
# phase=self.phase)
# alg.settings['conductance'] = 'throat.diffusive_conductance_temp'
# self.phase['throat.diffusive_conductance_temp'] = 1.0
# alg.settings['quantity'] = 'pore.concentration'
# alg.set_value_BC(pores=self.net.pores('bottom'), values=1)
# alg.set_value_BC(pores=self.net.pores('top'), values=0)
# alg.run()
# m1 = alg.rate(pores=self.net.pores('top'))
# m2 = -alg.rate(pores=self.net.pores('bottom'))
# # This should pass because the alg has only run once
# np.testing.assert_allclose(m1, m2)
# # Now adjust conductance values and re-run
# self.phase['throat.diffusive_conductance_temp'][[0, 1, 2]] *= 0.1
# alg.run()
# m1 = alg.rate(pores=self.net.pores('top'))
# m2 = -alg.rate(pores=self.net.pores('bottom'))
# # The mass won't balance, so the same test will fail
# with pytest.raises(AssertionError):
# np.testing.assert_allclose(m1, m2)
# # Now use reset method
# alg.reset()
# alg.run()
# m1 = alg.rate(pores=self.net.pores('top'))
# m2 = -alg.rate(pores=self.net.pores('bottom'))
# # Now this will pass again
# np.testing.assert_allclose(m1, m2)
# def test_validate_data_health(self):
# alg = op.algorithms.Transport(network=self.net,
# phase=self.phase)
# alg.settings['conductance'] = 'throat.diffusive_conductance'
# alg.settings['quantity'] = 'pore.concentration'
# alg.settings['cache'] = False
# alg.set_value_BC(pores=self.net.pores('top'), values=1)
# alg.set_value_BC(pores=self.net.pores('bottom'), values=0)
# # Check if the method can catch NaNs in data
# self.phase['throat.diffusive_conductance'][0] = np.nan
# with pytest.raises(Exception):
# alg.run()
# mod = op.models.misc.from_neighbor_pores
# self.phase["pore.seed"] = np.nan
# self.phys.add_model(propname="throat.diffusive_conductance", model=mod,
# prop="pore.seed", ignore_nans=False)
# with pytest.raises(Exception):
# alg.run()
# self.phase["pore.seed"] = 1
# self.phase.regenerate_models(propnames="throat.diffusive_conductance")
# # Check if the method can catch unhealthy topology
# Ts = self.net.find_neighbor_throats(pores=0)
# op.topotools.trim(self.net, throats=Ts)
# with pytest.raises(Exception):
# alg.run()
# # Reset network back to original
# self.setup_class()
# def test_total_rate(self):
# alg = op.algorithms.GenericTransport(network=self.net,
# phase=self.phase)
# h = op.utils.check_network_health(self.net)
# op.topotools.trim(self.net, pores=h['disconnected_pores'])
# alg.settings['conductance'] = 'throat.diffusive_conductance'
# alg.settings['quantity'] = 'pore.mole_fraction'
# alg.set_rate_BC(pores=[0, 1, 2, 3], total_rate=1)
# alg.set_value_BC(pores=[50, 51, 52, 53], values=0.0)
# alg.run()
# rate_individual = alg.rate(pores=[0, 1, 2, 3], mode='single')
# nt.assert_allclose(rate_individual, [0.25, 0.25, 0.25, 0.25],
# atol=1e-10)
# # test exceptions that come from adding total_rate feature
# with pytest.raises(Exception):
# alg.set_rate_BC(pores=[0, 1, 2, 3],
# total_rate=[0.25, 0.25, 0.25, 0.25])
# with pytest.raises(Exception):
# alg.set_rate_BC(pores=[0, 1, 2, 3], rates=1, total_rate=1)
# def test_network_continuity(self):
# net = op.network.Cubic([5, 1, 1])
# op.topotools.trim(network=net, pores=[2])
# phase = op.phase.Phase(network=net)
# phase['throat.diffusive_conductance'] = 1.0
# alg = op.algorithms.FickianDiffusion(network=net, phase=phase)
# alg.set_value_BC(pores=0, values=1)
# with pytest.raises(Exception):
# alg.run()
# alg.set_value_BC(pores=3, values=0)
# alg.run()
def test_x0_is_nan(self):
alg = op.algorithms.Transport(network=self.net,
phase=self.phase)
alg.settings['conductance'] = 'throat.diffusive_conductance'
alg.settings['quantity'] = 'pore.mole_fraction'
alg.set_value_BC(pores=self.net.pores('top'), values=1)
alg.set_value_BC(pores=self.net.pores('bottom'), values=0)
x0 = np.zeros(alg.Np)
x0[5] = np.nan
with pytest.raises(Exception):
alg.run(x0=x0)
def test_get_source_list(self):
alg = op.algorithms.Transport(network=self.net,
phase=self.phase)
assert alg['pore.source'] == {}
with pytest.raises(KeyError):
alg['pore.source_blah'] == {}
def teardown_class(self):
ws = op.Workspace()
ws.clear()
if __name__ == '__main__':
t = TransportTest()
t.setup_class()
self = t
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
t.__getattribute__(item)()
| {
"content_hash": "e51763e7ce9eb55eff4d1da4689f93cc",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 90,
"avg_line_length": 46.88717948717949,
"alnum_prop": 0.5865689598600022,
"repo_name": "PMEAL/OpenPNM",
"id": "b51bfc13d1c407973e8ef6655f1e733ff63d3ba0",
"size": "18286",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/unit/algorithms/GenericTransportTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "375"
},
{
"name": "Python",
"bytes": "1437146"
}
],
"symlink_target": ""
} |
"""A fork of flax.optim.MultiOptimizer that works with t5x.adafactor.
The original flax.optim.MultiOptimizer can be found at
https://github.com/google/flax/blob/main/flax/optim/base.py
"""
from typing import Sequence, Tuple
import flax
from flax import optim
from flax import traverse_util
from flax.core.frozen_dict import freeze
from flax.core.frozen_dict import unfreeze
import jax
import jax.numpy as jnp
from t5x import adafactor
# pylint: disable=protected-access
# local imports from optim:
OptimizerDef = optim.OptimizerDef
OptimizerState = optim.OptimizerState
Optimizer = optim.Optimizer
class _Marker:
"""Used to mark unoptimized leaves."""
def __init__(self):
self._indices = []
def standard_logical_factor_rules(rules=None):
"""Add prompt adafactor rules to your set of rules."""
if rules is None:
rules = adafactor.standard_logical_factor_rules()
rules = unfreeze(rules)
rules['prompt'] = adafactor.FactorDim.NONE
rules['tasks'] = adafactor.FactorDim.NONE
rules['prompt+embed'] = adafactor.FactorDim.NONE
rules['prompt_embed'] = adafactor.FactorDim.NONE
rules['batch'] = adafactor.FactorDim.BATCH
return freeze(rules)
def tree_of_paths(tree):
"""Converts a (frozen) nested dictionary into a (frozen) dict of paths."""
is_frozen = isinstance(tree, flax.core.frozen_dict.FrozenDict)
flat_tree = traverse_util.flatten_dict(unfreeze(tree))
path_tree = traverse_util.unflatten_dict(
{k: '/'.join(k) for k in flat_tree.keys()})
if is_frozen:
path_tree = freeze(path_tree)
return path_tree
def subtree_from_traversal(traversal, tree):
"""Creates a (frozen) tree subset given a traversal."""
is_frozen = isinstance(tree, flax.core.frozen_dict.FrozenDict)
flat_tree = {}
for path, leaf in zip(traversal.iterate(tree_of_paths(tree)),
traversal.iterate(tree)):
flat_tree[path] = leaf
new_tree = traverse_util.unflatten_dict(
{tuple(k.split('/')): v for k, v in flat_tree.items()})
if is_frozen:
new_tree = freeze(new_tree)
return new_tree
def update_subtree_of_traversal(traversal, tree, update):
"""Updates a (frozen) tree's subset given a traversal and update subtree."""
is_frozen = isinstance(tree, flax.core.frozen_dict.FrozenDict)
flat_tree = traverse_util.flatten_dict(unfreeze(tree))
flat_tree = {'/'.join(k): v for k, v in flat_tree.items()}
for path, leaf in zip(traversal.iterate(tree_of_paths(update)),
traversal.iterate(update)):
flat_tree[path] = leaf
nested_d = traverse_util.unflatten_dict(
{tuple(k.split('/')): v for k, v in flat_tree.items()})
if is_frozen:
nested_d = freeze(nested_d)
return nested_d
class MultiOptimizer(OptimizerDef):
"""Optimize subsets of parameters.
Note:
This MultiOptimizer should only be used with the t5x version of adafactor.
A MultiOptimizer is subclass of :class:`OptimizerDef` and useful for applying
separate optimizer algorithms to various subsets of the model parameters.
The example below creates two optimizers using
:class:`flax.traverse_util.ModelParamTraversal`:
one to optimize ``kernel`` parameters and to optimize ``bias`` parameters.
Note each optimizer is created with a different learning rate::
kernels = traverse_util.ModelParamTraversal(lambda path, _: 'kernel' in path)
biases = traverse_util.ModelParamTraversal(lambda path, _: 'bias' in path)
kernel_opt = optim.Momentum(learning_rate=0.01)
bias_opt = optim.Momentum(learning_rate=0.1)
opt_def = MultiOptimizer((kernels, kernel_opt), (biases, bias_opt))
optimizer = opt_def.create(model)
In order to train only a subset of the parameters, you can simply use a single
:class:`flax.traverse_util.ModelParamTraversal` instance.
If you want to update the learning rates of both optimizers online with
different learning rate schedules, you should update the learning rates when
applying the gradient. In the following example, the second optimizer is not
doing any optimization during the first 1000 steps::
hparams = optimizer.optimizer_def.hyper_params
new_optimizer = optimizer.apply_gradient(
grads,
hyper_params=[
hparams[0].replace(learning_rate=0.2),
hparams[1].replace(learning_rate=jnp.where(step < 1000, 0., lr)),
])
"""
def __init__(
self,
traversals_and_optimizers: Sequence[
Tuple[traverse_util.Traversal, OptimizerDef]]):
"""Create a new MultiOptimizer.
See docstring of :class:`MultiOptimizer` for more details.
Args:
traversals_and_optimizers: pairs of flax.traverse_util.Traversal and
`flax.optim.OptimizerDef` instances.
"""
traversals, sub_optimizers = zip(*traversals_and_optimizers)
hyper_params = [opt.hyper_params for opt in sub_optimizers]
super().__init__(hyper_params)
self.traversals = traversals
self.sub_optimizers = sub_optimizers
def init_state(self, params):
param_states = jax.tree_map(lambda x: _Marker(), params)
overlap = False
for idx, traversal in enumerate(self.traversals):
for match in traversal.iterate(param_states):
match._indices.append(idx)
overlap |= len(match._indices) > 1
if overlap:
raise ValueError(
'Multiple optimizers match the same leaves : ' +
str(jax.tree_map(lambda match: match._indices, param_states)))
param_states = jax.tree_map(lambda x: _Marker(), params)
for focus, opt_def in zip(self.traversals, self.sub_optimizers):
ps = subtree_from_traversal(focus, params)
ss = opt_def.init_state(ps)
param_states = update_subtree_of_traversal(
focus, param_states, ss.param_states)
# Update state to None when param is not optimized by any sub optimizer.
param_states = jax.tree_map(
lambda x: None if isinstance(x, _Marker) else x, param_states)
return OptimizerState(jnp.asarray(0, dtype=jnp.int32), param_states)
def apply_gradient(self, hyper_params, params, state, grads):
new_params = params
it = zip(self.traversals, self.sub_optimizers, hyper_params)
new_param_states = jax.tree_map(lambda x: _Marker(), params)
for focus, opt_def, hp in it:
ps = subtree_from_traversal(focus, params)
gs = subtree_from_traversal(focus, grads)
ss = subtree_from_traversal(focus, state.param_states)
prev_ss = OptimizerState(state.step, ss)
new_ps, new_ss = opt_def.apply_gradient(hp, ps, prev_ss, gs)
new_params = update_subtree_of_traversal(focus, new_params, new_ps)
new_param_states = update_subtree_of_traversal(
focus, new_param_states, new_ss.param_states)
# Update state to None when param is not optimized by any sub optimizer.
new_param_states = jax.tree_map(
lambda x: None if isinstance(x, _Marker) else x, new_param_states)
return new_params, OptimizerState(state.step + 1, new_param_states)
def update_hyper_params(self, **hyper_param_overrides):
"""Updates the hyper parameters with a set of overrides.
This method is called from :meth:`Optimizer.apply_gradient` to create the
hyper parameters for a specific optimization step.
MultiOptimizer will apply the overrides for each sub optimizer.
Args:
**hyper_param_overrides: the hyper parameters updates
will override the defaults specified in the `OptimizerDef`.
Pass `hyper_params=...` to replace all hyper parameters.
Returns:
The new hyper parameters.
"""
hps = hyper_param_overrides.pop('hyper_params', self.hyper_params)
if hyper_param_overrides:
hps = [hp.replace(**hyper_param_overrides) for hp in hps]
return hps
def set_param_axes(self, param_logical_axes):
"""Derives factorization rules from model parameter logical axes."""
for focus, opt_def in zip(self.traversals, self.sub_optimizers):
pla_subtree = subtree_from_traversal(focus, param_logical_axes)
if hasattr(opt_def, 'set_param_axes'):
opt_def.set_param_axes(pla_subtree)
def derive_logical_axes(self, optimizer, param_logical_axes):
"""Derives optimizer logical partitioning from model logical partitions."""
param_states = jax.tree_map(
lambda x: _Marker(), optimizer.state.param_states)
for focus, opt_def in zip(self.traversals, self.sub_optimizers):
if hasattr(opt_def, 'derive_logical_axes'):
ps = subtree_from_traversal(focus, param_logical_axes)
ss = subtree_from_traversal(focus, optimizer.state.param_states)
new_opt = opt_def.derive_logical_axes(
Optimizer(opt_def, OptimizerState(None, ss), ps), ps)
param_states = update_subtree_of_traversal(
focus, param_states, new_opt.state.param_states)
# Update axes to None when param is not optimized by any sub optimizer.
param_states = jax.tree_map(
lambda x: None if isinstance(x, _Marker) else x, param_states)
return Optimizer(optimizer.optimizer_def,
OptimizerState(None, param_states),
param_logical_axes)
| {
"content_hash": "93d90fc1da2bc4c07b03a5facbb0082f",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 80,
"avg_line_length": 40.43555555555555,
"alnum_prop": 0.6958672235656188,
"repo_name": "google-research/prompt-tuning",
"id": "8313d807b4ce84cf114be604e9e64ff18c86aba7",
"size": "9670",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "prompt_tuning/train/optim.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "496372"
},
{
"name": "Shell",
"bytes": "4553"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from rfxcom.protocol.lighting2 import Lighting2
from rfxcom.exceptions import (InvalidPacketLength, UnknownPacketSubtype,
UnknownPacketType)
class Lighting2TestCase(TestCase):
def setUp(self):
self.data = bytearray(b'\x0B\x11\x00\x01\x01\x11\xF3\x42'
b'\x0A\x01\x0F\x40')
self.parser = Lighting2()
def test_parse_frame_chacon_54781_on(self):
self.data = bytearray(b'\x0B\x11\x00\x01\x01\x11\xF3\x42'
b'\x0A\x01\x0F\x40')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'id': "0x0111F342",
'packet_length': 11,
'packet_type': 17,
'packet_type_name': "Lighting2 sensors",
'packet_subtype': 0,
'packet_subtype_name': "AC",
'sequence_number': 1,
'unit_code': 10,
'command': 1,
'command_text': "On",
'dim_level': 100,
'signal_level': 4
})
self.assertEquals(str(self.parser), "<Lighting2 ID:0x0111F342>")
def test_parse_frame_chacon_54781_off(self):
self.data = bytearray(b'\x0B\x11\x00\x03\x01\x11\xF3\x42'
b'\x0A\x00\x00\x50')
self.assertTrue(self.parser.validate_packet(self.data))
self.assertTrue(self.parser.can_handle(self.data))
result = self.parser.load(self.data)
self.assertEquals(result, {
'id': "0x0111F342",
'packet_length': 11,
'packet_type': 17,
'packet_type_name': "Lighting2 sensors",
'packet_subtype': 0,
'packet_subtype_name': "AC",
'sequence_number': 3,
'unit_code': 10,
'command': 0,
'command_text': "Off",
'dim_level': 0,
'signal_level': 5,
})
self.assertEquals(str(self.parser), "<Lighting2 ID:0x0111F342>")
def test_validate_bytes_short(self):
data = self.data[:1]
with self.assertRaises(InvalidPacketLength):
self.parser.validate_packet(data)
def test_validate_unkown_packet_type(self):
self.data[1] = 0xFF
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketType):
self.parser.validate_packet(self.data)
def test_validate_unknown_sub_type(self):
self.data[2] = 0xFF
self.assertFalse(self.parser.can_handle(self.data))
with self.assertRaises(UnknownPacketSubtype):
self.parser.validate_packet(self.data)
def test_log_namer(self):
self.assertEquals(self.parser.log.name, 'rfxcom.protocol.Lighting2')
| {
"content_hash": "a0358709108796197f5f07d2937a5d40",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 76,
"avg_line_length": 30.3125,
"alnum_prop": 0.5707903780068728,
"repo_name": "d0ugal-archive/python-rfxcom",
"id": "7b428624f7e86de12287734c8c76a9e0d8dbe415",
"size": "2910",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/protocol/test_lighting2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "96602"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
} |
import operator
from raksha.openstack.common import jsonutils
from raksha.openstack.common.scheduler import filters
class JsonFilter(filters.BaseHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms"""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def _parse_string(self, string, host_state):
"""Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may
use: $variable.dictkey
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
obj = getattr(host_state, path[0], None)
if obj is None:
return None
for item in path[1:]:
obj = obj.get(item, None)
if obj is None:
return None
return obj
def _process_filter(self, query, host_state):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host_state)
elif isinstance(arg, basestring):
arg = self._parse_string(arg, host_state)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
# TODO(zhiteng) Add description for filter_properties structure
# and scheduler_hints.
try:
query = filter_properties['scheduler_hints']['query']
except KeyError:
query = None
if not query:
return True
# NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
# Filter it out.
return True
return False
| {
"content_hash": "b8930a2ec57c21ccbab47a9ef96998fc",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 73,
"avg_line_length": 31.828358208955223,
"alnum_prop": 0.5582649472450176,
"repo_name": "DPaaS-Raksha/raksha",
"id": "e80d2d337db1acf71861ab0ce6d6de98686a4c70",
"size": "4907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raksha/openstack/common/scheduler/filters/json_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1101651"
}
],
"symlink_target": ""
} |
import os
import re
from distcovery.exceptions import InvalidTestRoot
_TEST_UNIT_PREFIX = 'test_'
_PYTHON_TOKEN_REGEX = '([a-zA-Z0-9_]+)'
_TEST_PACKAGE_REGEX = '(%s%s)' % (_TEST_UNIT_PREFIX, _PYTHON_TOKEN_REGEX)
_TEST_MODULE_REGEX = '%s\\.py' % _TEST_PACKAGE_REGEX
_TEST_NAME = 1
_TEST_ALIAS = 2
_TEST_PACKAGE_PATTERN = re.compile('%s$' % _TEST_PACKAGE_REGEX)
_TEST_MODULE_PATTERN = re.compile('%s$' % _TEST_MODULE_REGEX)
def _is_package(path):
return os.path.isdir(path) and \
os.path.isfile(os.path.join(path, '__init__.py'))
def _is_module(path):
return os.path.isfile(path)
class Importable(object):
def __init__(self, base, path, match=None, parent=None):
self.base = base
self.path = path
if match and parent:
self.alias = parent.alias + (match.group(_TEST_ALIAS),)
self.name = parent.name + (match.group(_TEST_NAME),)
else:
self.alias = tuple()
self.name = tuple()
@staticmethod
def join_sequence(sequence):
return '.'.join(sequence)
def str_alias(self):
return self.join_sequence(self.alias)
def str_name(self):
return self.join_sequence(self.base + self.name)
class Package(Importable):
def __init__(self, base, path, match=None, parent=None):
super(Package, self).__init__(base, path, match, parent)
self.modules = []
self.packages = []
self.content = {}
def listdir(self):
for name in os.listdir(self.path):
yield os.path.join(self.path, name), name
def walk(self):
for path, name in self.listdir():
child = None
match = _TEST_PACKAGE_PATTERN.match(name)
if match:
if _is_package(path):
child = Package(self.base, path, match, self)
for child_alias, child_iterable in child.walk():
self.content[child_alias] = child_iterable
yield child_alias, child_iterable
self.packages.append(child)
else:
match = _TEST_MODULE_PATTERN.match(name)
if match and _is_module(path):
child = Importable(self.base, path, match, self)
self.modules.append(child)
if child:
child_alias = child.str_alias()
self.content[child_alias] = child
yield child_alias, child
def enumerate(self, level=1):
for module in self.modules:
yield False, level, module.str_alias()
for package in self.packages:
yield True, level, package.str_alias()
for item in package.enumerate(level + 1):
yield item
def _split_path(path, root):
head = path
tail = tuple()
while head != root:
head, name = os.path.split(head)
if not name:
raise InvalidTestRoot(path, root)
tail = (name,) + tail
return tail
def walk(path):
package = Package(_split_path(os.path.abspath(path), os.getcwd()), path)
content = dict(package.walk())
return package
| {
"content_hash": "914b27bef8d3dfd7e06495c2f3787848",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 76,
"avg_line_length": 29.425925925925927,
"alnum_prop": 0.565449968533669,
"repo_name": "vasili-v/distcovery",
"id": "08511bd63b42e5d2982bf06c2ce73f649cfd9a96",
"size": "3178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distcovery/path.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50194"
}
],
"symlink_target": ""
} |
"""
Define Touchstone file format reader/writer.
[[[cog
import os, sys
sys.path.append(os.environ['TRACER_DIR'])
import trace_ex_eng_touchstone
exobj = trace_ex_eng_touchstone.trace_module(no_print=True)
]]]
[[[end]]]
"""
# touchstone.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,C0325,C0413,E1101,E1111,R0914,W0611
# Standard library imports
import copy
import math
import os
import re
import warnings
# PyPI imports
if os.environ.get("READTHEDOCS", "") != "True":
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
import pexdoc.exh
import pexdoc.pcontracts
from pexdoc.ptypes import file_name, file_name_exists
# Intra-package imports imports
from .functions import to_scientific_string
from .ptypes import touchstone_data, touchstone_noise_data, touchstone_options
###
# Functions
###
def _chunk_noise(noise):
"""Chunk input noise data into valid Touchstone file rows."""
data = zip(
noise["freq"],
noise["nf"],
np.abs(noise["rc"]),
np.angle(noise["rc"]),
noise["res"],
)
for freq, nf, rcmag, rcangle, res in data:
yield freq, nf, rcmag, rcangle, res
def _chunk_pars(freq_vector, data_matrix, pformat):
"""Chunk input data into valid Touchstone file rows."""
pformat = pformat.upper()
length = 4
for freq, data in zip(freq_vector, data_matrix):
data = data.flatten()
for index in range(0, data.size, length):
fpoint = [freq] if not index else [None]
cdata = data[index : index + length]
if pformat == "MA":
vector1 = np.abs(cdata)
vector2 = np.rad2deg(np.angle(cdata))
elif pformat == "RI":
vector1 = np.real(cdata)
vector2 = np.imag(cdata)
else: # elif pformat == 'DB':
vector1 = 20.0 * np.log10(np.abs(cdata))
vector2 = np.rad2deg(np.angle(cdata))
sep_data = np.array([])
for item1, item2 in zip(vector1, vector2):
sep_data = np.concatenate((sep_data, np.array([item1, item2])))
ret = np.concatenate((np.array(fpoint), sep_data))
yield ret
@pexdoc.pcontracts.contract(fname="file_name_exists")
def read_touchstone(fname):
r"""
Read a `Touchstone <https://ibis.org/connector/touchstone_spec11.pdf>`_ file.
According to the specification a data line can have at most values for four
complex parameters (plus potentially the frequency point), however this
function is able to process malformed files as long as they have the
correct number of data points (:code:`points` x :code:`nports` x
:code:`nports` where :code:`points` represents the number of frequency
points and :code:`nports` represents the number of ports in the file). Per
the Touchstone specification noise data is only supported for two-port
files
:param fname: Touchstone file name
:type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#filenameexists>`_
:rtype: dictionary with the following structure:
* **nports** (*integer*) -- number of ports
* **opts** (:ref:`TouchstoneOptions`) -- File options
* **data** (:ref:`TouchstoneData`) -- Parameter data
* **noise** (:ref:`TouchstoneNoiseData`) -- Noise data, per the Touchstone
specification only supported in 2-port files
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.touchstone.read_touchstone
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (File *[fname]* does not have a valid extension)
* RuntimeError (File *[fname]* has no data)
* RuntimeError (First non-comment line is not the option line)
* RuntimeError (Frequency must increase)
* RuntimeError (Illegal data in line *[lineno]*)
* RuntimeError (Illegal option line)
* RuntimeError (Malformed data)
* RuntimeError (Malformed noise data)
* RuntimeError (Noise frequency must increase)
.. [[[end]]]
.. note:: The returned parameter(s) are complex numbers in real and
imaginary format regardless of the format used in the Touchstone file.
Similarly, the returned frequency vector unit is Hertz regardless of
the unit used in the Touchstone file
"""
# pylint: disable=R0912,R0915,W0702
# Exceptions definitions
exnports = pexdoc.exh.addex(
RuntimeError, "File *[fname]* does not have a valid extension"
)
exnoopt = pexdoc.exh.addex(
RuntimeError, "First non-comment line is not the option line"
)
exopt = pexdoc.exh.addex(RuntimeError, "Illegal option line")
exline = pexdoc.exh.addex(RuntimeError, "Illegal data in line *[lineno]*")
exnodata = pexdoc.exh.addex(RuntimeError, "File *[fname]* has no data")
exdata = pexdoc.exh.addex(RuntimeError, "Malformed data")
exndata = pexdoc.exh.addex(RuntimeError, "Malformed noise data")
exfreq = pexdoc.exh.addex(RuntimeError, "Frequency must increase")
exnfreq = pexdoc.exh.addex(RuntimeError, "Noise frequency must increase")
# Verify that file has correct extension format
_, ext = os.path.splitext(fname)
ext = ext.lower()
nports_regexp = re.compile(r"\.s(\d+)p")
match = nports_regexp.match(ext)
exnports(not match, edata={"field": "fname", "value": fname})
nports = int(match.groups()[0])
opt_line = False
units_dict = {"GHZ": "GHz", "MHZ": "MHz", "KHZ": "KHz", "HZ": "Hz"}
scale_dict = {"GHZ": 1e9, "MHZ": 1e6, "KHZ": 1e3, "HZ": 1.0}
units_opts = ["GHZ", "MHZ", "KHZ", "HZ"]
type_opts = ["S", "Y", "Z", "H", "G"]
format_opts = ["DB", "MA", "RI"]
opts = dict(units=None, ptype=None, pformat=None, z0=None)
data = []
with open(fname, "r") as fobj:
for num, line in enumerate(fobj):
line = line.strip().upper()
# Comment line
if line.startswith("!"):
continue
# Options line
if (not opt_line) and (not line.startswith("#")):
exnoopt(True)
if not opt_line:
# Each Touchstone data file must contain an option line
# (additional option lines after the first one will be ignored)
opt_line = True
tokens = line[1:].split() # Remove initial hash
if "R" in tokens:
idx = tokens.index("R")
add = 1
if len(tokens) > idx + 1:
try:
opts["z0"] = float(tokens[idx + 1])
add = 2
except:
pass
tokens = tokens[:idx] + tokens[idx + add :]
matches = 0
for token in tokens:
if (token in format_opts) and (not opts["pformat"]):
matches += 1
opts["pformat"] = token
elif (token in units_opts) and (not opts["units"]):
matches += 1
opts["units"] = units_dict[token]
elif (token in type_opts) and (not opts["ptype"]):
matches += 1
opts["ptype"] = token
exopt(matches != len(tokens))
if opt_line and line.startswith("#"):
continue
# Data lines
try:
if "!" in line:
idx = line.index("!")
line = line[:idx]
tokens = [float(item) for item in line.split()]
data.append(tokens)
except:
exline(True, edata={"field": "lineno", "value": num + 1})
data = np.concatenate(data)
exnodata(not data.size, edata={"field": "fname", "value": fname})
# Set option defaults
opts["units"] = opts["units"] or "GHz"
opts["ptype"] = opts["ptype"] or "S"
opts["pformat"] = opts["pformat"] or "MA"
opts["z0"] = opts["z0"] or 50
# Format data
data_dict = {}
nums_per_freq = 1 + (2 * (nports ** 2))
fslice = slice(0, data.size, nums_per_freq)
freq = data[fslice]
ndiff = np.diff(freq)
ndict = {}
if (nports == 2) and ndiff.size and (min(ndiff) <= 0):
# Extract noise data
npoints = np.where(ndiff <= 0)[0][0] + 1
freq = freq[:npoints]
ndata = data[9 * npoints :]
nfpoints = int(ndata.size / 5.0)
exndata(ndata.size % 5 != 0)
data = data[: 9 * npoints]
ndiff = 1
nfslice = slice(0, ndata.size, 5)
nfreq = ndata[nfslice]
ndiff = np.diff(nfreq)
exnfreq(bool(ndiff.size and (min(ndiff) <= 0)))
nfig_slice = slice(1, ndata.size, 5)
rlmag_slice = slice(2, ndata.size, 5)
rlphase_slice = slice(3, ndata.size, 5)
res_slice = slice(4, ndata.size, 5)
ndict["freq"] = scale_dict[opts["units"].upper()] * nfreq
ndict["nf"] = ndata[nfig_slice]
ndict["rc"] = ndata[rlmag_slice] * np.exp(1j * ndata[rlphase_slice])
ndict["res"] = ndata[res_slice]
ndict["points"] = nfpoints
exdata(data.size % nums_per_freq != 0)
npoints = int(data.size / nums_per_freq)
exfreq(bool(ndiff.size and (min(ndiff) <= 0)))
data_dict["freq"] = scale_dict[opts["units"].upper()] * freq
d1slice = slice(0, data.size, 2)
d2slice = slice(1, data.size, 2)
data = np.delete(data, fslice)
# For format that has angle information, the angle is given in degrees
if opts["pformat"] == "MA":
data = data[d1slice] * np.exp(1j * np.deg2rad(data[d2slice]))
elif opts["pformat"] == "RI":
data = data[d1slice] + (1j * data[d2slice])
else: # if opts['pformat'] == 'DB':
data = (10 ** (data[d1slice] / 20.0)) * np.exp(1j * np.deg2rad(data[d2slice]))
if nports > 1:
data_dict["pars"] = np.resize(data, (npoints, nports, nports))
else:
data_dict["pars"] = copy.copy(data)
del data
data_dict["points"] = npoints
if nports == 2:
# The order of data for a two-port file is N11, N21, N12, N22 but for
# m ports where m > 2, the order is N11, N12, N13, ..., N1m
data_dict["pars"] = np.transpose(data_dict["pars"], (0, 2, 1))
return dict(nports=nports, opts=opts, data=data_dict, noise=ndict)
@pexdoc.pcontracts.contract(
fname="file_name",
options="touchstone_options",
data="touchstone_data",
noise="None|touchstone_noise_data",
frac_length="int,>=0",
exp_length="int,>0",
)
def write_touchstone(fname, options, data, noise=None, frac_length=10, exp_length=2):
r"""
Write a `Touchstone`_ file.
Parameter data is first resized to an :code:`points` x :code:`nports` x
:code:`nports` where :code:`points` represents the number of frequency
points and :code:`nports` represents the number of ports in the file; then
parameter data is written to file in scientific notation
:param fname: Touchstone file name
:type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#filenameexists>`_
:param options: Touchstone file options
:type options: :ref:`TouchstoneOptions`
:param data: Touchstone file parameter data
:type data: :ref:`TouchstoneData`
:param noise: Touchstone file parameter noise data (only supported in
two-port files)
:type noise: :ref:`TouchstoneNoiseData`
:param frac_length: Number of digits to use in fractional part of data
:type frac_length: non-negative integer
:param exp_length: Number of digits to use in exponent
:type exp_length: positive integer
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.touchstone.write_touchstone
:raises:
* RuntimeError (Argument \`data\` is not valid)
* RuntimeError (Argument \`exp_length\` is not valid)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frac_length\` is not valid)
* RuntimeError (Argument \`noise\` is not valid)
* RuntimeError (Argument \`options\` is not valid)
* RuntimeError (File *[fname]* does not have a valid extension)
* RuntimeError (Malformed data)
* RuntimeError (Noise data only supported in two-port files)
.. [[[end]]]
"""
# pylint: disable=R0913
# Exceptions definitions
exnports = pexdoc.exh.addex(
RuntimeError, "File *[fname]* does not have a valid extension"
)
exnoise = pexdoc.exh.addex(
RuntimeError, "Noise data only supported in two-port files"
)
expoints = pexdoc.exh.addex(RuntimeError, "Malformed data")
# Data validation
_, ext = os.path.splitext(fname)
ext = ext.lower()
nports_regexp = re.compile(r"\.s(\d+)p")
match = nports_regexp.match(ext)
exnports(not match, edata={"field": "fname", "value": fname})
nports = int(match.groups()[0])
exnoise(bool((nports != 2) and noise))
nums_per_freq = nports ** 2
expoints(data["points"] * nums_per_freq != data["pars"].size)
#
npoints = data["points"]
par_data = np.resize(np.copy(data["pars"]), (npoints, nports, nports))
if nports == 2:
par_data = np.transpose(par_data, (0, 2, 1))
units_dict = {"ghz": "GHz", "mhz": "MHz", "khz": "KHz", "hz": "Hz"}
options["units"] = units_dict[options["units"].lower()]
fspace = 2 + frac_length + (exp_length + 2)
# Format data
with open(fname, "w") as fobj:
fobj.write(
"# {units} {ptype} {pformat} R {z0}\n".format(
units=options["units"],
ptype=options["ptype"],
pformat=options["pformat"],
z0=options["z0"],
)
)
for row in _chunk_pars(data["freq"], par_data, options["pformat"]):
row_data = [
to_scientific_string(item, frac_length, exp_length, bool(num != 0))
if item is not None
else fspace * " "
for num, item in enumerate(row)
]
fobj.write(" ".join(row_data) + "\n")
if (nports == 2) and noise:
fobj.write("! Noise data\n")
for row in _chunk_noise(noise):
row_data = [
to_scientific_string(item, frac_length, exp_length, bool(num != 0))
for num, item in enumerate(row)
]
fobj.write(" ".join(row_data) + "\n")
| {
"content_hash": "a2b690de70006f20b70088800f4f5111",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 87,
"avg_line_length": 37.54040404040404,
"alnum_prop": 0.5830082066460379,
"repo_name": "pmacosta/peng",
"id": "8e28dda844402de766b067589db62d1d2277aafb",
"size": "14866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peng/touchstone.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3515"
},
{
"name": "Python",
"bytes": "409268"
},
{
"name": "Shell",
"bytes": "14159"
}
],
"symlink_target": ""
} |
""" Example program used by execute_code functions """
# Example comment for unit testing
# pylint: disable=too-few-public-methods
class Hello(object):
""" Simple class to show imports """
def __init__(self):
self.msg = 'Hello, ' + 'world!'
def out(self):
""" returns Hello, world!"""
return self.msg
if __name__ == "__main__":
print Hello().out()
| {
"content_hash": "35fe3a0e3becf958a23f1e25c6945ebc",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 54,
"avg_line_length": 26.066666666666666,
"alnum_prop": 0.5907928388746803,
"repo_name": "jpsenior/sphinx-execute-code",
"id": "fafa837482485c7ecdb24261a98229ef6fd941a5",
"size": "413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/example_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9825"
}
],
"symlink_target": ""
} |
"""Class for hyperparameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs."""
_HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks.
def __init__(self, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = HParams(learning_rate=0.1, num_hidden_units=100,
activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
**kwargs: Key-value pairs where the key is the hyperparameter name and the
value is the value for the parameter.
Raises:
ValueError: If one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could be the name of a pre-existing
# attribute of this object.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError('Multi-valued hyperparameters cannot be empty: %s' %
name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
| {
"content_hash": "e4bc5c43746f8b59062a853a353ecd26",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 34.61797752808989,
"alnum_prop": 0.673482635507952,
"repo_name": "google-research/valan",
"id": "ff7412365abc13791dca470c65777a8c5909d7ee",
"size": "3669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework/hparam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "991"
},
{
"name": "Python",
"bytes": "639552"
},
{
"name": "Shell",
"bytes": "17082"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from ommongo.py3compat import *
from functools import wraps
def known_failure(fun):
@wraps(fun)
def wrapper(*args, **kwds):
try:
fun(*args, **kwds)
raise Exception('Known failure passed! %s' % fun.__name__)
except:
pass
return wrapper
| {
"content_hash": "b25fdab1eafe8d42e3f2544c78c38913",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 24.214285714285715,
"alnum_prop": 0.5899705014749262,
"repo_name": "bapakode/OmMongo",
"id": "32c4307371e27874f7470933148aea7ce5be479c",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "270963"
}
],
"symlink_target": ""
} |
from app import db
from app.models import User
from tests.general import AppTestCase
class TestModels(AppTestCase):
def test_user_initialization(self):
user = User(name='foo', social_id='bar')
db.session.add(user)
db.session.commit()
dictionary = user.dictionary
self.assertIsNotNone(user)
self.assertIsNotNone(dictionary)
| {
"content_hash": "0be7519c8b1cd76324620a7264e8234b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 48,
"avg_line_length": 27,
"alnum_prop": 0.6851851851851852,
"repo_name": "Encrylize/MyDictionary",
"id": "412611f6031f9c8571c059d7744c8c5029150a13",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "262"
},
{
"name": "HTML",
"bytes": "7854"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "27531"
}
],
"symlink_target": ""
} |
import os
from dataclasses import dataclass, field
from functional import seq
from typing import Any, Iterable, Set
from anki.exporting import AnkiExporter
from .file_provider import FileProvider
@dataclass
class NoteModelFileProvider(FileProvider):
anki_collection: Any
model_ids: Iterable[int]
models: Iterable = field(init=False)
anki_exporter: AnkiExporter = field(init=False)
def __post_init__(self):
self.anki_exporter = AnkiExporter(self.anki_collection)
self.models = seq(self.model_ids) \
.map(self.anki_collection.models.get) \
.filter(lambda m: m is not None).to_list()
def get_files(self) -> Set[str]:
return seq(os.listdir(self.anki_collection.media.dir())) \
.filter(lambda fn: fn.startswith("_")) \
.filter(self.belongs_to_any_model) \
.to_set()
def belongs_to_any_model(self, file_name: str) -> bool:
return seq(self.models) \
.map(lambda model: self.anki_exporter._modelHasMedia(model, file_name)) \
.any()
| {
"content_hash": "092b02b673433039973a2370d640f2a7",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 85,
"avg_line_length": 32.57575757575758,
"alnum_prop": 0.6502325581395348,
"repo_name": "Stvad/CrowdAnki",
"id": "c2e1b0c6ea4b3ff8fcb101f64eb146e5431e85b2",
"size": "1075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crowd_anki/anki/adapters/note_model_file_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "152637"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
} |
import os
import bcrypt
from wtforms import form, fields, validators
from flask_wtf import Form
from wtforms import TextAreaField, StringField, validators
from app import models
class LoginForm(Form):
""" Login form with built in validation of password. """
username = fields.TextField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_password(self, field):
self.user = models.User.query.filter_by(username=self.username.data).first()
if self.user == None:
raise validators.ValidationError('Invalid username or password')
if bcrypt.hashpw(self.password.data.encode('utf-8'),
self.user.password_hash) != \
self.user.password_hash:
raise validators.ValidationError('Invalid username or password')
class AddTokenForm(Form):
""" Form to add a U2F token. """
name = fields.TextField(validators=[validators.required()])
response = fields.HiddenField(validators=[validators.required()])
class SignTokenForm(Form):
""" Sign in using a token """
response = fields.HiddenField(validators=[validators.required()])
| {
"content_hash": "f1eb339edf6c6d446e60f0e58fdfb8e5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 84,
"avg_line_length": 31.102564102564102,
"alnum_prop": 0.6892003297609234,
"repo_name": "streety/Home",
"id": "5c2f5bc318fbdf794601c986cd9a1d19ac4f0e89",
"size": "1213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "88018"
},
{
"name": "Dockerfile",
"bytes": "3938"
},
{
"name": "HTML",
"bytes": "40473"
},
{
"name": "JavaScript",
"bytes": "20880"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "28042"
}
],
"symlink_target": ""
} |
from pprint import pprint
import kaboom.api
import kaboom.vm
def status(api):
print "Coinbase: %s" % api.coinbase()
print "Listening? %s" % api.is_listening()
print "Mining? %s" % api.is_mining()
print "Peer count: %d" % api.peer_count()
last_block = api.last_block()
print "Last Block:"
pprint(last_block)
keys = api.keys()
print "Keys:"
for key in keys:
address = api.secret_to_address(key)
balance = api.balance_at(address)
print "- %s %.4e" % (address, balance)
if __name__ == '__main__':
kaboom.vm.ensure_running()
api = kaboom.api.Api()
api.wait_for_startup(verbose=True)
status(api)
| {
"content_hash": "c01f30ff514338d0bf27fef18e5fcc3b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 46,
"avg_line_length": 23.344827586206897,
"alnum_prop": 0.6026587887740029,
"repo_name": "Cpt-Obvious/kaboom",
"id": "11577038cfa00bb9348415bf034b08014919cfad",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "status.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11394"
},
{
"name": "Shell",
"bytes": "112"
}
],
"symlink_target": ""
} |
import time
from kafka import SimpleProducer, KafkaClient
from kafka.common import LeaderNotAvailableError
import json
def print_response(response=None):
if response:
print('Error: {0}'.format(response[0].error))
print('Offset: {0}'.format(response[0].offset))
def main():
kafka = KafkaClient("192.168.33.10:9092")
producer = SimpleProducer(kafka)
topic = b'test_test'
# msg = "{'resource':'student','action':'added','id':'db2987','object':{'last':'burge','first':'dustin','courses':['COMS W4115','COMS W4170','COMS E6111'],'uni':'db2987'}}"
#msg = "{resource:student,action:added,id:db2987,object:{last:burge,first:dustin,courses:[COMS W4115,COMS W4170,COMS E6111],uni:db2987}}"
msgAdded = {"resource":"student","action":"added","id":"db2987","object":{"last":"burge","first":"dustin","courses":["COMSW4115","COMSW4170","COMSE6111"],"uni":"db2987"}}
msgRemoved = {"resource":"student","action":"removed","id":"db2987"}
#msgModified =
msg = msgRemoved
try:
print_response(producer.send_messages(topic, json.dumps(msg)))
except LeaderNotAvailableError:
# https://github.com/mumrah/kafka-python/issues/249
time.sleep(1)
print_response(producer.send_messages(topic, msg))
kafka.close()
if __name__ == "__main__":
main() | {
"content_hash": "9de2f055c7cc86619ef40c70055f1a28",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 176,
"avg_line_length": 37.138888888888886,
"alnum_prop": 0.6537023186237846,
"repo_name": "jhn/generic",
"id": "2facb5e9d8035380404ce2b2a71fa82dd605992a",
"size": "1337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrity/produce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "4779"
},
{
"name": "Java",
"bytes": "1512"
},
{
"name": "JavaScript",
"bytes": "71315"
},
{
"name": "Python",
"bytes": "12780"
},
{
"name": "Shell",
"bytes": "9050"
}
],
"symlink_target": ""
} |
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class ApplicationsOperations(object):
"""ApplicationsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "1.6".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "1.6"
self.config = config
def create(
self, parameters, custom_headers=None, raw=False, **operation_config):
"""Create a new application.
:param parameters: The parameters for creating an application.
:type parameters: ~azure.graphrbac.models.ApplicationCreateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Application or ClientRawResponse if raw=true
:rtype: ~azure.graphrbac.models.Application or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
# Construct URL
url = '/{tenantID}/applications'
path_format_arguments = {
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ApplicationCreateParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
raise models.GraphErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('Application', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, filter=None, custom_headers=None, raw=False, **operation_config):
"""Lists applications by filter parameters.
:param filter: The filters to apply to the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Application
:rtype:
~azure.graphrbac.models.ApplicationPaged[~azure.graphrbac.models.Application]
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/{tenantID}/applications'
path_format_arguments = {
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = '/{tenantID}/{nextLink}'
path_format_arguments = {
'nextLink': self._serialize.url("next_link", next_link, 'str', skip_quote=True),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.GraphErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.ApplicationPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ApplicationPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def delete(
self, application_object_id, custom_headers=None, raw=False, **operation_config):
"""Delete an application.
:param application_object_id: Application object ID.
:type application_object_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
# Construct URL
url = '/{tenantID}/applications/{applicationObjectId}'
path_format_arguments = {
'applicationObjectId': self._serialize.url("application_object_id", application_object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
raise models.GraphErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, application_object_id, custom_headers=None, raw=False, **operation_config):
"""Get an application by object ID.
:param application_object_id: Application object ID.
:type application_object_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Application or ClientRawResponse if raw=true
:rtype: ~azure.graphrbac.models.Application or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
# Construct URL
url = '/{tenantID}/applications/{applicationObjectId}'
path_format_arguments = {
'applicationObjectId': self._serialize.url("application_object_id", application_object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.GraphErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Application', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def patch(
self, application_object_id, parameters, custom_headers=None, raw=False, **operation_config):
"""Update an existing application.
:param application_object_id: Application object ID.
:type application_object_id: str
:param parameters: Parameters to update an existing application.
:type parameters: ~azure.graphrbac.models.ApplicationUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
# Construct URL
url = '/{tenantID}/applications/{applicationObjectId}'
path_format_arguments = {
'applicationObjectId': self._serialize.url("application_object_id", application_object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ApplicationUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
raise models.GraphErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_key_credentials(
self, application_object_id, custom_headers=None, raw=False, **operation_config):
"""Get the keyCredentials associated with an application.
:param application_object_id: Application object ID.
:type application_object_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of KeyCredential
:rtype:
~azure.graphrbac.models.KeyCredentialPaged[~azure.graphrbac.models.KeyCredential]
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/{tenantID}/applications/{applicationObjectId}/keyCredentials'
path_format_arguments = {
'applicationObjectId': self._serialize.url("application_object_id", application_object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.GraphErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.KeyCredentialPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.KeyCredentialPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def update_key_credentials(
self, application_object_id, value, custom_headers=None, raw=False, **operation_config):
"""Update the keyCredentials associated with an application.
:param application_object_id: Application object ID.
:type application_object_id: str
:param value: A collection of KeyCredentials.
:type value: list[~azure.graphrbac.models.KeyCredential]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
parameters = models.KeyCredentialsUpdateParameters(value=value)
# Construct URL
url = '/{tenantID}/applications/{applicationObjectId}/keyCredentials'
path_format_arguments = {
'applicationObjectId': self._serialize.url("application_object_id", application_object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'KeyCredentialsUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
raise models.GraphErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_password_credentials(
self, application_object_id, custom_headers=None, raw=False, **operation_config):
"""Get the passwordCredentials associated with an application.
:param application_object_id: Application object ID.
:type application_object_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PasswordCredential
:rtype:
~azure.graphrbac.models.PasswordCredentialPaged[~azure.graphrbac.models.PasswordCredential]
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/{tenantID}/applications/{applicationObjectId}/passwordCredentials'
path_format_arguments = {
'applicationObjectId': self._serialize.url("application_object_id", application_object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.GraphErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.PasswordCredentialPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PasswordCredentialPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def update_password_credentials(
self, application_object_id, value, custom_headers=None, raw=False, **operation_config):
"""Update passwordCredentials associated with an application.
:param application_object_id: Application object ID.
:type application_object_id: str
:param value: A collection of PasswordCredentials.
:type value: list[~azure.graphrbac.models.PasswordCredential]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
parameters = models.PasswordCredentialsUpdateParameters(value=value)
# Construct URL
url = '/{tenantID}/applications/{applicationObjectId}/passwordCredentials'
path_format_arguments = {
'applicationObjectId': self._serialize.url("application_object_id", application_object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PasswordCredentialsUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
raise models.GraphErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| {
"content_hash": "797bdf21e24eb4d7cfacfc03cf4adb13",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 144,
"avg_line_length": 45.530141843971634,
"alnum_prop": 0.6411854044160599,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "5aa4cc31047c005c201ddebd0189f0186d924715",
"size": "26153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-graphrbac/azure/graphrbac/operations/applications_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""Tests for cmake database generation.
Attributes:
FlagsFile (TYPE): Description
"""
import imp
from os import path
from unittest import TestCase
from EasyClangComplete.plugin.flags_sources import flags_file
from EasyClangComplete.plugin.utils import flag
from EasyClangComplete.plugin.utils import search_scope
imp.reload(flags_file)
imp.reload(flag)
imp.reload(search_scope)
SearchScope = search_scope.TreeSearchScope
FlagsFile = flags_file.FlagsFile
Flag = flag.Flag
class TestFlagsFile(TestCase):
"""Test finding and generatgin flags from .clang_complete file.
Attributes:
view (TYPE): Description
"""
def test_init(self):
"""Initialization test."""
self.assertEqual(FlagsFile._FILE_NAME, '.clang_complete')
def test_load_file(self):
"""Test finding and loading existing file."""
test_file_path = path.join(
path.dirname(__file__), 'test_files', 'test.cpp')
flags_file = FlagsFile(['-I', '-isystem'])
flags = flags_file.get_flags(test_file_path)
# This flag only exists in .clang_complete to help us test that
# we can read the flag.
self.assertIn(Flag('', '-Wabi'), flags)
def test_fail_to_find(self):
"""Test failing to find a .clang_complete file."""
test_file_path = path.join(
path.dirname(__file__), 'test_files', 'test.cpp')
folder = path.dirname(test_file_path)
flags_file = FlagsFile(['-I', '-isystem'])
wrong_scope = SearchScope(from_folder=folder, to_folder=folder)
flags = flags_file.get_flags(test_file_path, wrong_scope)
self.assertIs(flags, None)
| {
"content_hash": "da935c4b27ec35ed40233b3eb904dd30",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 71,
"avg_line_length": 29.803571428571427,
"alnum_prop": 0.6602756141402037,
"repo_name": "niosus/EasyClangComplete",
"id": "37e1f2507548e8116a1cb0e23bdac4fb80de8e84",
"size": "1669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_clang_complete_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "469"
},
{
"name": "C++",
"bytes": "4969"
},
{
"name": "CMake",
"bytes": "1160"
},
{
"name": "CSS",
"bytes": "136"
},
{
"name": "Makefile",
"bytes": "444"
},
{
"name": "Objective-C",
"bytes": "4185"
},
{
"name": "Objective-C++",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1615297"
},
{
"name": "Starlark",
"bytes": "105"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('quotes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='quote',
name='mentions',
field=models.ManyToManyField(blank=True, editable=False, related_name='mentioned_in', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='quote',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_of', to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "4e87b448e6f3c93aee00b53293c1bf87",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 136,
"avg_line_length": 31.26923076923077,
"alnum_prop": 0.6445264452644527,
"repo_name": "nivbend/memoir",
"id": "bce87dcc42ca0420240beb40c2e46738eb95f197",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quotes/migrations/0002_mentions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64"
},
{
"name": "HTML",
"bytes": "21058"
},
{
"name": "JavaScript",
"bytes": "4850"
},
{
"name": "Python",
"bytes": "62912"
}
],
"symlink_target": ""
} |
import argparse
import os
import os.path
import sqlite3
from . import zettel
# TODO: 'filename' and 'document' are special fields for stuff not really part of the YAML dictionary
# but still needing to be indexable and searchable.
ZettelSQLFields = zettel.ZettelFieldsOrdered
# Default Zettel DB name
ZDB = 'zettels.db'
def get_argparse():
parser = argparse.ArgumentParser()
parser.add_argument(
'--database', help="database name", required=True)
return parser
import pprint
printer = pprint.PrettyPrinter(indent=2)
def unquote(text):
return text.replace('"', '').replace("'", "")
class SQLiteFTS(object):
def __init__(self, db_name, field_names):
self.db_name = db_name
self.conn = sqlite3.connect(db_name)
self.conn.row_factory = sqlite3.Row
self.cursor = self.conn.cursor()
self.fts_field_names = field_names
# for sqlite insert template generation
self.fts_field_refs = ['?'] * len(self.fts_field_names)
self.fts_field_init = [''] * len(self.fts_field_names)
self.fts_fields = dict(zip(self.fts_field_names, self.fts_field_refs))
self.fts_default_record = dict(
zip(self.fts_field_names, self.fts_field_init))
self.zettel = None
def bind(self, zettel, filename, document=""):
self.zettel = zettel
doc = zettel.get_indexed_representation()
doc.update({'filename': filename})
doc.update({'document': document})
self.record = self.fts_default_record.copy()
for k in doc.keys():
if k in self.record.keys():
if doc[k] != None:
self.record[k] = doc[k]
else:
print("Unknown fts field %s - deleting it" % k)
# self.record.update(doc)
def drop_table(self):
self.cursor.execute("DROP TABLE IF EXISTS zettels")
self.conn.commit()
def create_table(self):
sql_fields = ",".join(self.fts_default_record.keys())
#print("CREATE VIRTUAL TABLE zettels USING fts4(%s)" % sql_fields)
self.cursor.execute(
"CREATE VIRTUAL TABLE zettels USING fts4(%s)" % sql_fields)
self.conn.commit()
self.create_index_table('tags', 'tag')
self.create_index_table('mentions', 'mention')
def create_index_table(self, table_name, field_name):
self.cursor.execute("DROP TABLE IF EXISTS %(table_name)s" % vars())
self.cursor.execute(
"CREATE TABLE %(table_name)s (%(field_name)s text)" % vars())
self.conn.commit()
def update_index(self, table_name, field_name, items):
if not items:
return
for item in items:
self.cursor.execute(
"INSERT INTO %(table_name)s (%(field_name)s) VALUES (?)" % vars(), (item,))
# NB: (item,) means to pack this item into a tuple as required by sqlite3.
def insert_into_table(self):
sql_params = ",".join(self.fts_fields.values())
sql_columns = ",".join(list(self.record.keys()))
sql_insert_values = list(self.record.values())
insert_sql = "INSERT INTO zettels (%s) VALUES (%s)" % (
sql_columns, sql_params)
self.cursor.execute(insert_sql, sql_insert_values)
self.conn.commit()
self.update_index('tags', 'tag', self.zettel.get_list_field('tags'))
self.update_index('mentions', 'mention',
self.zettel.get_list_field('mentions'))
# A term_list is a list of 3-tuples (not-option, fieldname, word)
def fts_search(self, term_list):
safe_term_list = []
for term in term_list:
if type(term) == type(()) and len(term) == 3:
(name, not_operator, words) = term
words = unquote(words)
if not_operator not in '-':
not_operator = ''
if name not in self.fts_field_names:
continue
for word in words.split():
safe_term_list.append((name, ":", not_operator, word))
# print(safe_term_list)
fts_terms = " ".join(["".join(list(term)) for term in safe_term_list])
Q = "SELECT * from zettels where zettels match '%s'" % fts_terms
# print(Q)
for row in self.cursor.execute(Q):
yield(row)
def fts_query(self, prepared_sql):
return self.cursor.execute(prepared_sql)
def get_tags_generator(self):
Q = "select distinct(tag) from tags"
for row in self.cursor.execute(Q):
yield(row['tag'])
def get_tags_list(self):
gen = self.get_tags_generator()
return list(gen)
def get_mentions_generator(self):
Q = "select distinct(mention) from mentions"
for row in self.cursor.execute(Q):
yield(row['mention'])
def get_mentions_list(self):
gen = self.get_mentions_generator()
return list(gen)
def done(self):
self.conn.commit()
self.conn.close()
class FNF(Exception):
def __init__(self, text):
self.text = text
def __str__(self):
return "File not found: " + self.text
def get(db_name):
return SQLiteFTS(db_name, ZettelSQLFields)
GRAMMAR = """@@grammar::ZQUERY
start = expression $ ;
expression
=
| or_expr
| term
;
or_expr
=
left:expression op:'|' right:term
;
term
=
| and_expr
| factor
;
and_expr
= left:term op:and_op right:factor
;
and_op
= op:'&'
| op:'!'
;
not_expr
= left:term op:'!' right:factor
;
factor
=
| '(' @:expression ')'
| z_field
;
z_field
= field:literal ':' text:literal
;
literal
= word:/"(\s+|\w+)*"/
| word:/\w+/
;
"""
| {
"content_hash": "57b0998d31970a12f8ca0035b48182d3",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 101,
"avg_line_length": 27.903846153846153,
"alnum_prop": 0.5725361819434872,
"repo_name": "ZettelGeist/zettelgeist",
"id": "4b4fb3b934296c0780bff0c2ad6f5aa4a54784f1",
"size": "6031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zettelgeist/zdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "Jupyter Notebook",
"bytes": "29099"
},
{
"name": "Makefile",
"bytes": "638"
},
{
"name": "Python",
"bytes": "68716"
},
{
"name": "Ruby",
"bytes": "953"
},
{
"name": "Scala",
"bytes": "248"
},
{
"name": "Shell",
"bytes": "560"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2010 Karl-Michael Schneider
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
IMG_INSTALLER_WELCOME = "1291928925203.png"
IMG_BUTTON_NEXT = "1275000336584.png"
IMG_BUTTON_NEXT_BLUE = "1270078045794.png"
IMG_BUTTON_NEXT_ORANGE = "1270078081419.png"
IMG_BUTTON_BACK = "1274999069194.png"
IMG_BUTTON_BACK_BLUE = "1292969863750.png"
IMG_BUTTON_BACK_ORANGE = "1292969902750.png"
IMG_BUTTON_CANCEL = "1273622286828.png"
IMG_BUTTON_CANCEL_BLUE = "1292969934359.png"
IMG_BUTTON_CANCEL_ORANGE = "1292969960937.png"
IMG_BUTTON_FINISH = "1292976192625.png"
IMG_BUTTON_FINISH_BLUE = "1270078348075.png"
IMG_BUTTON_FINISH_ORANGE = "1270079567059.png"
IMG_BUTTON_INSTALL = "1292976081437.png"
IMG_BUTTON_INSTALL_BLUE = "1292976114515.png"
IMG_BUTTON_INSTALL_ORANGE = "1270078125950.png"
IMG_BUTTON_YES = "1292970034109.png"
IMG_BUTTON_YES_BLUE = "1292970066078.png"
IMG_BUTTON_YES_ORANGE = "1292970087609.png"
IMG_BUTTON_NO = "1292970104187.png"
IMG_BUTTON_NO_BLUE = "1292970132328.png"
IMG_BUTTON_NO_ORANGE = "1292970156203.png"
IMG_DISABLED_BUTTON_BACK = "1274999264709.png"
IMG_DISABLED_BUTTON_NEXT = "1274999232413.png"
IMG_DISABLED_BUTTON_CANCEL = "1292976235218.png"
IMG_CHECKED_BOX = "1270078326700.png"
IMG_CHECKED_BOX_ORANGE = "1273627402328.png"
IMG_UNCHECKED_BOX = "1273627423984.png"
IMG_UNCHECKED_BOX_ORANGE = "1273627455171.png"
| {
"content_hash": "5eae7196332e62cf7016323b26f253cd",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 43.924528301886795,
"alnum_prop": 0.7865120274914089,
"repo_name": "karlmicha/rguils",
"id": "bcb1fef635c3bd6f87a0f168b1fa19c7384c4559",
"size": "2328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/sample/installerimages.sikuli/installerimages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "767"
},
{
"name": "Python",
"bytes": "128664"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('devilry_compressionutil', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='compressedarchivemeta',
name='backend_id',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AddField(
model_name='compressedarchivemeta',
name='delete',
field=models.NullBooleanField(default=None),
),
]
| {
"content_hash": "09dd2ab6462ab52f08dca423a98f532e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 26.476190476190474,
"alnum_prop": 0.5881294964028777,
"repo_name": "devilry/devilry-django",
"id": "a03c4bb7d75fdf789925ab0e14e5ad0bc12add85",
"size": "630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/devilry_compressionutil/migrations/0002_auto_20170119_1202.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
} |
"""
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/description/
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
p_path = self.dfs(root, p)[1]
q_path = self.dfs(root, q)[1]
lca = None
i = 0
while i < len(p_path) and i < len(q_path):
if p_path[i] != q_path[i]:
break
else:
lca = p_path[i]
i += 1
return lca
def dfs(self, root, target):
if root is None:
return False, None
if root == target:
return True, [root]
if root.left:
found, path = self.dfs(root.left, target)
if found:
return True, [root] + path
if root.right:
found, path = self.dfs(root.right, target)
if found:
return True, [root] + path
return False, None
| {
"content_hash": "625964f18d6cd66239e9f31064141ba7",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 90,
"avg_line_length": 26.979591836734695,
"alnum_prop": 0.5075642965204236,
"repo_name": "ytjia/coding-practice",
"id": "5a953b93e56cb224e2e74ea7571c024123e02f12",
"size": "1389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "algorithms/python/leetcode/LowestCommonAncestorofaBinaryTree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3707"
},
{
"name": "C++",
"bytes": "12625"
},
{
"name": "Java",
"bytes": "10465"
},
{
"name": "Python",
"bytes": "14783"
}
],
"symlink_target": ""
} |
""" ymir.loom
factories for fabric commands
"""
import os
from fabric.colors import red
from fabric.contrib.console import confirm
def create_version_bump_cmd(pkg_name=None, version_delta=0.1, **kargs):
""" a factory for generating a 'version-bump' function,
which can be called from service fabfiles """
assert pkg_name is not None
verbose_name = kargs.pop('verbose_name', pkg_name)
def version_bump():
""" bump the version number for """ + verbose_name
sandbox = {}
version_file = os.path.join(pkg_name, 'version.py')
err = 'version file not found in expected location: ' + version_file
assert os.path.exists(version_file), err
# running "import pkg.version" should have no side-effects,
# so there's little point in parsing the file. just exec
execfile(version_file, sandbox)
current_version = sandbox['__version__']
new_version = current_version + version_delta
with open(version_file, 'r') as fhandle:
version_file_contents = [x for x in fhandle.readlines()
if x.strip()]
new_file = version_file_contents[:-1] + \
["__version__={0}".format(new_version)]
new_file = '\n'.join(new_file)
print red("warning:") + \
" version will be changed to {0}".format(new_version)
print
print red("new version file will look like this:\n")
print new_file
ans = confirm('proceed with version change?')
if not ans:
print 'aborting.'
return
with open(version_file, 'w') as fhandle:
fhandle.write(new_file)
print 'version has been rewritten.'
return version_bump
| {
"content_hash": "da36b103cf51e2e56106d1f5c9a6f525",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 39.24444444444445,
"alnum_prop": 0.5996602491506229,
"repo_name": "mattvonrocketstein/ymir",
"id": "5293cbfe0490ff9bda18712fbc23cdca8371fb0f",
"size": "1790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ymir/loom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "330"
},
{
"name": "Puppet",
"bytes": "3363"
},
{
"name": "Python",
"bytes": "177403"
},
{
"name": "Ruby",
"bytes": "9827"
},
{
"name": "Shell",
"bytes": "31589"
}
],
"symlink_target": ""
} |
"""Tests for the conductor service."""
import mox
from nova.api.ec2 import ec2utils
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova import test
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
def stub_out_client_exceptions(self):
def passthru(exceptions, func, *args, **kwargs):
return func(*args, **kwargs)
self.stubs.Set(rpc_common, 'catch_client_exception', passthru)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
self.stub_out_client_exceptions()
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
self.assertEqual(jsonutils.to_primitive(migration),
self.conductor.migration_get(self.context,
migration['id']))
def test_migration_get_unconfirmed_by_dest_compute(self):
self.mox.StubOutWithMock(db,
'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
self.mox.ReplayAll()
self.conductor.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_migration_create(self):
inst = {'uuid': 'fake-uuid',
'host': 'fake-host',
'node': 'fake-node'}
self.mox.StubOutWithMock(db, 'migration_create')
db.migration_create(self.context.elevated(),
{'instance_uuid': inst['uuid'],
'source_compute': inst['host'],
'source_node': inst['node'],
'fake-key': 'fake-value'}).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.migration_create(self.context, inst,
{'fake-key': 'fake-value'})
self.assertEqual(result, 'result')
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
migration_p = jsonutils.to_primitive(migration)
migration = self.conductor.migration_update(self.context, migration_p,
'finished')
self.assertEqual(migration['status'], 'finished')
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'])
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertTrue(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertFalse(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get(self):
aggregate_ref = self._setup_aggregate_with_host()
aggregate = self.conductor.aggregate_get(self.context,
aggregate_ref['id'])
self.assertEqual(jsonutils.to_primitive(aggregate_ref), aggregate)
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get_by_host(self):
self._setup_aggregate_with_host()
aggregates = self.conductor.aggregate_get_by_host(self.context, 'bar')
self.assertEqual(aggregates[0]['availability_zone'], 'foo')
def test_aggregate_metadata_add(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
metadata = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_add(
mox.IgnoreArg(), aggregate['id'], metadata, False).AndReturn(
metadata)
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_add(self.context,
aggregate,
metadata)
self.assertEqual(result, metadata)
def test_aggregate_metadata_delete(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
db.aggregate_metadata_delete(mox.IgnoreArg(), aggregate['id'], 'fake')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_delete(self.context,
aggregate,
'fake')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args)
self.assertEqual(result, 'foo')
def test_security_group_get_by_instance(self):
fake_instance = {'id': 'fake-instance'}
self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
db.security_group_get_by_instance(
self.context, fake_instance['id']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_get_by_instance(self.context,
fake_instance)
self.assertEqual(result, 'it worked')
def test_security_group_rule_get_by_security_group(self):
fake_secgroup = {'id': 'fake-secgroup'}
self.mox.StubOutWithMock(db,
'security_group_rule_get_by_security_group')
db.security_group_rule_get_by_security_group(
self.context, fake_secgroup['id']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_rule_get_by_security_group(
self.context, fake_secgroup)
self.assertEqual(result, 'it worked')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst)
self.assertEqual(result, 'fake-result')
def test_instance_get_all_hung_in_rebooting(self):
self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
db.instance_get_all_hung_in_rebooting(self.context, 123)
self.mox.ReplayAll()
self.conductor.instance_get_all_hung_in_rebooting(self.context, 123)
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_destroy(self.context, {'uuid': 'fake-uuid'})
def test_instance_info_cache_delete(self):
self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
db.instance_info_cache_delete(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_info_cache_delete(self.context,
{'uuid': 'fake-uuid'})
def test_instance_info_cache_update(self):
fake_values = {'key1': 'val1', 'key2': 'val2'}
fake_instance = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_info_cache_update(self.context, 'fake-uuid',
fake_values,
update_cells='meow')
self.mox.ReplayAll()
self.conductor.instance_info_cache_update(self.context,
fake_instance,
fake_values,
update_cells='meow')
def test_instance_type_get(self):
self.mox.StubOutWithMock(db, 'instance_type_get')
db.instance_type_get(self.context, 'fake-id').AndReturn('fake-type')
self.mox.ReplayAll()
result = self.conductor.instance_type_get(self.context, 'fake-id')
self.assertEqual(result, 'fake-type')
def test_vol_get_usage_by_time(self):
self.mox.StubOutWithMock(db, 'vol_get_usage_by_time')
db.vol_get_usage_by_time(self.context, 'fake-time').AndReturn(
'fake-usage')
self.mox.ReplayAll()
result = self.conductor.vol_get_usage_by_time(self.context,
'fake-time')
self.assertEqual(result, 'fake-usage')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
inst = self._create_fake_instance({
'project_id': 'fake-project_id',
'user_id': 'fake-user_id',
})
db.vol_usage_update(self.context, 'fake-vol', 'rd-req', 'rd-bytes',
'wr-req', 'wr-bytes', inst['uuid'],
'fake-project_id', 'fake-user_id', 'fake-az',
'fake-refr', 'fake-bool')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol', 'rd-req',
'rd-bytes', 'wr-req', 'wr-bytes',
inst, 'fake-refr', 'fake-bool')
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], 'fake-values',
False).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
'fake-values', False)
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertEqual(result, None)
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host')
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
def test_notify_usage_exists(self):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': {},
'image_meta': {},
'extra': 'info',
}
instance = {
'system_metadata': [],
}
self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
compute_utils.notify_about_instance_usage(self.context, instance,
'exists',
system_metadata={},
extra_usage_info=info)
self.mox.ReplayAll()
self.conductor.notify_usage_exists(self.context, instance,
system_metadata={},
extra_usage_info=dict(extra='info'))
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_quota_commit(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
quota.QUOTAS.commit(self.context, 'reservations', project_id=None)
quota.QUOTAS.commit(self.context, 'reservations', project_id='proj')
self.mox.ReplayAll()
self.conductor.quota_commit(self.context, 'reservations')
self.conductor.quota_commit(self.context, 'reservations', 'proj')
def test_quota_rollback(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.rollback(self.context, 'reservations', project_id=None)
quota.QUOTAS.rollback(self.context, 'reservations', project_id='proj')
self.mox.ReplayAll()
self.conductor.quota_rollback(self.context, 'reservations')
self.conductor.quota_rollback(self.context, 'reservations', 'proj')
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
def test_compute_stop(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api, 'stop')
self.conductor_manager.compute_api.stop(self.context, 'instance', True)
self.mox.ReplayAll()
self.conductor.compute_stop(self.context, 'instance')
def test_compute_confirm_resize(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
self.conductor_manager.compute_api.confirm_resize(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, 'instance',
'migration')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, fake_bdm)
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
db.block_device_mapping_update_or_create(self.context, fake_bdm)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm'}
fake_bdm2 = {'id': 'fake-bdm-2'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy(self.context, 'fake-bdm-2')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
[fake_bdm,
fake_bdm2])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host')
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node')
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(rpc_common.ClientException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.stub_out_client_exceptions()
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, fake_bdm)
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
db.block_device_mapping_update_or_create(self.context, fake_bdm)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
bdms=[fake_bdm])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, 'fake-bdm')
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context, [fake_bdm])
self.conductor.block_device_mapping_destroy_by_instance_and_device(
self.context, fake_inst, 'fake-device')
self.conductor.block_device_mapping_destroy_by_instance_and_volume(
self.context, fake_inst, 'fake-volume')
def test_instance_get_all(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all(self.context)
db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'},
'updated_at', 'asc',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all(self.context)
self.conductor.instance_get_all_by_filters(self.context,
{'name': 'fake-inst'},
'updated_at', 'asc')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host')
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise rpc_common.Timeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertTrue(None in timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertTrue(isinstance(conductor.API(),
conductor_api.LocalAPI))
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertTrue(isinstance(conductor.API(),
conductor_api.API))
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertTrue(isinstance(conductor.API(use_local=True),
conductor_api.LocalAPI))
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
| {
"content_hash": "1a0f806e11d9ef2cb220dee443e3bd30",
"timestamp": "",
"source": "github",
"line_count": 1128,
"max_line_length": 79,
"avg_line_length": 46.71719858156028,
"alnum_prop": 0.5420801943184621,
"repo_name": "sridevikoushik31/nova",
"id": "5044342f9f7df8059cbe53ed70f6573059788bf1",
"size": "53302",
"binary": false,
"copies": "1",
"ref": "refs/heads/port_id_in_vif_on_devide",
"path": "nova/tests/conductor/test_conductor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9944606"
},
{
"name": "Ruby",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
} |
"""
Copyright 2017 ManerFan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pygame
from enum import Enum
WIDTH = 24
HEIGHT = 18
CELL_SIZE = 40
WINDOW_WIDTH = WIDTH * CELL_SIZE
WINDOW_HEIGHT = HEIGHT * CELL_SIZE
screen = None
font = None
class COLOR(Enum):
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
DARK_GREEN = (0, 155, 0)
DARK_GRAY = (40, 40, 40)
BG_COLOR = (0, 0, 0)
def init(title, width=24, height=18):
"""
初始化画布
"""
global screen, font, WIDTH, HEIGHT, WINDOW_WIDTH, WINDOW_HEIGHT
WIDTH = width
HEIGHT = height
WINDOW_WIDTH = WIDTH * CELL_SIZE
WINDOW_HEIGHT = HEIGHT * CELL_SIZE
pygame.init()
pygame.display.set_caption(title)
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
font = pygame.font.Font('freesansbold.ttf', 18)
def update():
pygame.display.update()
def draw_bg():
screen.fill(COLOR.BG_COLOR.value)
def draw_grid():
"""
在画布上绘制grid方格
"""
for x in range(0, WINDOW_WIDTH, CELL_SIZE):
pygame.draw.line(screen, COLOR.DARK_GRAY.value, (x, 0), (x, WINDOW_HEIGHT))
for y in range(0, WINDOW_HEIGHT, CELL_SIZE):
pygame.draw.line(screen, COLOR.DARK_GRAY.value, (0, y), (WINDOW_WIDTH, y))
update()
def draw_cell_line(cells, cell_color=COLOR.GREEN.value, line_color=COLOR.DARK_GREEN.value):
"""
绘制像素点,并使用直线将像素点连接
:param cells: 像素点
:param cell_color: 像素点颜色
:param line_color: 线条颜色
:return:
"""
if len(cells) < 1:
return
draw_cells(cells, cell_color)
for i in range(1, len(cells)):
(x1, y1) = cells[i - 1]
(x2, y2) = cells[i]
pygame.draw.line(screen, line_color,
((x1 + .5) * CELL_SIZE, (y1 + .5) * CELL_SIZE),
((x2 + .5) * CELL_SIZE, (y2 + .5) * CELL_SIZE),
6)
def draw_cells(cells, color=COLOR.GREEN.value):
"""
绘制像素点
:param cells: 像素点
:param color: 颜色
"""
for cell in cells:
draw_cell(cell, color)
def draw_cell(cell, color=COLOR.GREEN.value):
"""
绘制一个像素点
:param cell: 像素点位置
:param color: 像素点颜色
"""
(x, y) = cell
x = x * CELL_SIZE
y = y * CELL_SIZE
outer_rect = pygame.Rect(x, y, CELL_SIZE, CELL_SIZE)
padding_rect = pygame.Rect(x + 2, y + 2, CELL_SIZE - 4, CELL_SIZE - 4)
inner_rect = pygame.Rect(x + 4, y + 4, CELL_SIZE - 8, CELL_SIZE - 8)
pygame.draw.rect(screen, color, outer_rect)
pygame.draw.rect(screen, COLOR.BG_COLOR.value, padding_rect)
pygame.draw.rect(screen, color, inner_rect)
| {
"content_hash": "12a7453375987d2549b3741929e0eebf",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 91,
"avg_line_length": 25.224,
"alnum_prop": 0.6089438629876308,
"repo_name": "manerfan/python-game-snake",
"id": "fef50bf0989cb38b4b1639616247fc77dd4bfc6f",
"size": "3291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/graph/canvas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24897"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.