max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
insights/parsers/tests/test_zipl_conf.py | lhuett/insights-core | 121 | 14053 | <filename>insights/parsers/tests/test_zipl_conf.py
from insights.parsers.zipl_conf import ZiplConf
from insights.tests import context_wrap
from insights.parsers import ParseException
import pytest
ZIPL_CONF = """
[defaultboot]
defaultauto
prompt=1
timeout=5
default=linux
target=/boot
[linux]
image=/boot/vmlinuz-3.10.0-693.el7.s390x
ramdisk=/boot/initramfs-3.10.0-693.el7.s390x.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0 LANG=en_US.UTF-8"
[linux-0-rescue-a27932c8d57248e390cee3798bbd3709]
image=/boot/vmlinuz-0-rescue-a27932c8d57248e390cee3798bbd3709
ramdisk=/boot/initramfs-0-rescue-a27932c8d57248e390cee3798bbd3709.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0"
[other]
image=/boot/vmlinuz
ramdisk=/boot/initramfs.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100
# Configuration for dumping to SCSI disk
# Separate IPL and dump partitions
[dumpscsi]
target=/boot
dumptofs=/dev/sda2
parameters="dump_dir=/mydumps dump_compress=none dump_mode=auto"
# Menu containing two DASD boot configurations
:menu1
1=linux
2=linux-0-rescue-a27932c8d57248e390cee3798bbd3709
default=1
prompt=1
timeout=30
""".strip()
ZIPL_CONF_INVALID = """
prompt=1
timeout=5
default=linux
[linux]
image=/boot/vmlinuz-3.10.0-693.el7.s390x
ramdisk=/boot/initramfs-3.10.0-693.el7.s390x.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0 LANG=en_US.UTF-8"
""".strip()
def test_zipl_conf():
res = ZiplConf(context_wrap(ZIPL_CONF))
assert res.get('linux').get('image') == "/boot/vmlinuz-3.10.0-693.el7.s390x"
assert res['linux']['image'] == "/boot/vmlinuz-3.10.0-693.el7.s390x"
assert res[':menu1']['1'] == 'linux'
assert 'defaultauto' in res['defaultboot']
assert res['defaultboot']['defaultauto'] is True
assert res['other']['parameters'] == '"root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100'
assert res.images == {
'linux': '/boot/vmlinuz-3.10.0-693.el7.s390x',
'linux-0-rescue-a27932c8d57248e390cee3798bbd3709': '/boot/vmlinuz-0-rescue-a27932c8d57248e390cee3798bbd3709',
'other': '/boot/vmlinuz'
}
assert res.dumptofses == {'dumpscsi': '/dev/sda2'}
def test_zipl_conf_invalid():
with pytest.raises(ParseException) as pe:
ZiplConf(context_wrap(ZIPL_CONF_INVALID))
assert "Invalid zipl configuration file is found." in str(pe)
|
pony/orm/tests/test_generator_db_session.py | ProgHaj/pony | 2,628 | 14060 | from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.core import local
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
class TestGeneratorDbSession(unittest.TestCase):
def setUp(self):
db = Database()
class Account(db.Entity):
id = PrimaryKey(int)
amount = Required(int)
setup_database(db)
self.db = db
self.Account = Account
with db_session:
a1 = Account(id=1, amount=1000)
a2 = Account(id=2, amount=2000)
a3 = Account(id=3, amount=3000)
def tearDown(self):
teardown_database(self.db)
assert local.db_session is None
self.db = self.Account = None
@raises_exception(TypeError, 'db_session with `retry` option cannot be applied to generator function')
def test1(self):
@db_session(retry=3)
def f(): yield
@raises_exception(TypeError, 'db_session with `ddl` option cannot be applied to generator function')
def test2(self):
@db_session(ddl=True)
def f(): yield
@raises_exception(TypeError, 'db_session with `serializable` option cannot be applied to generator function')
def test3(self):
@db_session(serializable=True)
def f(): yield
def test4(self):
@db_session(immediate=True)
def f(): yield
@raises_exception(TransactionError, '@db_session-wrapped generator cannot be used inside another db_session')
def test5(self):
@db_session
def f(): yield
with db_session:
next(f())
def test6(self):
@db_session
def f():
x = local.db_session
self.assertTrue(x is not None)
yield self.db._get_cache()
self.assertEqual(local.db_session, x)
a1 = self.Account[1]
yield a1.amount
self.assertEqual(local.db_session, x)
a2 = self.Account[2]
yield a2.amount
gen = f()
cache = next(gen)
self.assertTrue(cache.is_alive)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 1000)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 2000)
self.assertEqual(local.db_session, None)
try: next(gen)
except StopIteration:
self.assertFalse(cache.is_alive)
else:
self.fail()
def test7(self):
@db_session
def f(id1):
a1 = self.Account[id1]
id2 = yield a1.amount
a2 = self.Account[id2]
amount = yield a2.amount
a1.amount -= amount
a2.amount += amount
commit()
gen = f(1)
amount1 = next(gen)
self.assertEqual(amount1, 1000)
amount2 = gen.send(2)
self.assertEqual(amount2, 2000)
try:
gen.send(100)
except StopIteration:
pass
else:
self.fail()
with db_session:
a1 = self.Account[1]
self.assertEqual(a1.amount, 900)
a2 = self.Account[2]
self.assertEqual(a2.amount, 2100)
@raises_exception(TransactionError, 'You need to manually commit() changes before suspending the generator')
def test8(self):
@db_session
def f(id1):
a1 = self.Account[id1]
a1.amount += 100
yield a1.amount
for amount in f(1):
pass
def test9(self):
@db_session
def f(id1):
a1 = self.Account[id1]
a1.amount += 100
commit()
yield a1.amount
for amount in f(1):
pass
def test10(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
a1.amount += 100
with db_session:
a = self.Account[1].amount
for amount in f(1):
pass
with db_session:
b = self.Account[1].amount
self.assertEqual(b, a + 100)
def test12(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
gen = f(1)
next(gen)
gen.close()
@raises_exception(TypeError, 'error message')
def test13(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
gen = f(1)
next(gen)
gen.throw(TypeError('error message'))
if __name__ == '__main__':
unittest.main()
|
h/security/predicates.py | hypothesis/h | 2,103 | 14085 | """
Define authorization predicates.
These are functions which accept an `Identity` object and a context object and
return a truthy value. These represent building blocks of our permission map
which define when people do, or don't have permissions.
For example a predicate might define "group_created_by_user" which is only
true when a user is present, a group is present and the user created that
group.
"""
from itertools import chain
from h.models.group import JoinableBy, ReadableBy, WriteableBy
def requires(*parent_predicates):
"""
Decorate a predicate to say it requires other predicates to be True first.
:param parent_predicates: A list of predicates that have to be true for
this predicate to be true as well.
"""
def decorator(function):
function.requires = parent_predicates
return function
return decorator
# Identity things
def authenticated(identity, _context):
return identity
# The `@requires` here means that this predicate needs `authenticate` to be
# True before it's True. It also avoids attribute errors if identity is None
@requires(authenticated)
def authenticated_user(identity, _context):
return identity.user
@requires(authenticated_user)
def user_is_staff(identity, _context):
return identity.user.staff
@requires(authenticated_user)
def user_is_admin(identity, _context):
return identity.user.admin
@requires(authenticated)
def authenticated_client(identity, _context):
return identity.auth_client
@requires(authenticated_client)
def authenticated_client_is_lms(identity, _context):
authority = identity.auth_client.authority
return authority.startswith("lms.") and authority.endswith(".hypothes.is")
# Users
def user_found(_identity, context):
return hasattr(context, "user") and context.user
@requires(authenticated_client, user_found)
def user_authority_matches_authenticated_client(identity, context):
return context.user.authority == identity.auth_client.authority
# Annotations
def annotation_found(_identity, context):
return hasattr(context, "annotation") and context.annotation
@requires(annotation_found)
def annotation_shared(_identity, context):
return context.annotation.shared
@requires(annotation_found)
def annotation_not_shared(_identity, context):
return not context.annotation.shared
@requires(annotation_found)
def annotation_live(_identity, context):
return not context.annotation.deleted
@requires(authenticated_user, annotation_found)
def annotation_created_by_user(identity, context):
return identity.user.userid == context.annotation.userid
# Groups
def group_found(_identity, context):
return hasattr(context, "group") and context.group
def group_not_found(_identity, context):
return not hasattr(context, "group") or not context.group
@requires(group_found)
def group_writable_by_members(_identity, context):
return context.group.writeable_by == WriteableBy.members
@requires(group_found)
def group_writable_by_authority(_identity, context):
return context.group.writeable_by == WriteableBy.authority
@requires(group_found)
def group_readable_by_world(_identity, context):
return context.group.readable_by == ReadableBy.world
@requires(group_found)
def group_readable_by_members(_identity, context):
return context.group.readable_by == ReadableBy.members
@requires(group_found)
def group_joinable_by_authority(_identity, context):
return context.group.joinable_by == JoinableBy.authority
@requires(authenticated_user, group_found)
def group_created_by_user(identity, context):
return context.group.creator and context.group.creator.id == identity.user.id
@requires(authenticated_user, group_found)
def group_has_user_as_member(identity, context):
# With detached groups like we have with the websocket, this doesn't work
# as SQLAlchemy does not consider them equal:
# return context.group in identity.user.groups
return any(user_group.id == context.group.id for user_group in identity.user.groups)
@requires(authenticated_user, group_found)
def group_matches_user_authority(identity, context):
return context.group.authority == identity.user.authority
@requires(authenticated_client, group_found)
def group_matches_authenticated_client_authority(identity, context):
return context.group.authority == identity.auth_client.authority
def resolve_predicates(mapping):
"""
Expand predicates with requirements into concrete lists of predicates.
This takes a permission map which contains predicates which reference
other ones (using `@requires`), and converts each clause to include the
parents in parent first order. This means any parent which is referred to
by a predicate is executed before it, and no predicate appears more than once.
"""
return {
key: [_expand_clause(clause) for clause in clauses]
for key, clauses in mapping.items()
}
def _expand_clause(clause):
"""Generate all of the predicates + parents in a clause without dupes."""
seen_before = set()
# The chain.from_iterable here flattens nested iterables
return list(
chain.from_iterable(
_expand_predicate(predicate, seen_before) for predicate in clause
)
)
def _expand_predicate(predicate, seen_before):
"""Generate all of the parents and the predicate in parents first order."""
if hasattr(predicate, "requires"):
for parent in predicate.requires:
yield from _expand_predicate(parent, seen_before)
if predicate not in seen_before:
seen_before.add(predicate)
yield predicate
|
tests/test_reusable_executor.py | hoodmane/loky | 153 | 14088 | <gh_stars>100-1000
import os
import sys
import gc
import ctypes
import psutil
import pytest
import warnings
import threading
from time import sleep
from multiprocessing import util, current_process
from pickle import PicklingError, UnpicklingError
from distutils.version import LooseVersion
import loky
from loky import cpu_count
from loky import get_reusable_executor
from loky.process_executor import _RemoteTraceback, TerminatedWorkerError
from loky.process_executor import BrokenProcessPool, ShutdownExecutorError
from loky.reusable_executor import _ReusablePoolExecutor
import cloudpickle
from ._executor_mixin import ReusableExecutorMixin
from .utils import TimingWrapper, id_sleep, check_python_subprocess_call
from .utils import filter_match
cloudpickle_version = LooseVersion(cloudpickle.__version__)
# Compat windows
if sys.platform == "win32":
from signal import SIGTERM as SIGKILL
libc = ctypes.cdll.msvcrt
else:
from signal import SIGKILL
from ctypes.util import find_library
libc = ctypes.CDLL(find_library("c"))
try:
import numpy as np
except ImportError:
np = None
# Backward compat for python2 cPickle module
PICKLING_ERRORS = (PicklingError,)
try:
import cPickle
PICKLING_ERRORS += (cPickle.PicklingError,)
except ImportError:
pass
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if hasattr(mod, reg):
getattr(mod, reg).clear()
def wait_dead(worker, n_tries=1000, delay=0.001):
"""Wait for process pid to die"""
for i in range(n_tries):
if worker.exitcode is not None:
return
sleep(delay)
raise RuntimeError("Process %d failed to die for at least %0.3fs" %
(worker.pid, delay * n_tries))
def crash():
"""Induces a segfault"""
import faulthandler
faulthandler._sigsegv()
def exit():
"""Induces a sys exit with exitcode 0"""
sys.exit(0)
def c_exit(exitcode=0):
"""Induces a libc exit with exitcode 0"""
libc.exit(exitcode)
def sleep_then_check_pids_exist(arg):
"""Sleep for some time and the check if all the passed pids exist"""
time, pids = arg
sleep(time)
res = True
for p in pids:
res &= psutil.pid_exists(p)
return res
def kill_friend(pid, delay=0):
"""Function that send SIGKILL at process pid"""
sleep(delay)
try:
os.kill(pid, SIGKILL)
except (PermissionError, ProcessLookupError) as e:
if psutil.pid_exists(pid):
util.debug("Fail to kill an alive process?!?")
raise e
util.debug("process {} was already dead".format(pid))
def raise_error(etype=UnpicklingError, message=None):
"""Function that raises an Exception in process"""
raise etype(message)
def return_instance(cls):
"""Function that returns a instance of cls"""
return cls()
class SayWhenError(ValueError):
pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
def do_nothing(arg):
"""Function that return True, test passing argument"""
return True
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return exit, ()
class CExitAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
c_exit()
class CExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return c_exit, ()
class ErrorAtPickle(object):
"""Bad object that raises an error at pickling time."""
def __init__(self, fail=True):
self.fail = fail
def __reduce__(self):
if self.fail:
raise PicklingError("Error in pickle")
else:
return id, (42, )
class ErrorAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __init__(self, etype=UnpicklingError, message='the error message'):
self.etype = etype
self.message = message
def __reduce__(self):
return raise_error, (self.etype, self.message)
class CrashAtGCInWorker(object):
"""Bad object that triggers a segfault at call item GC time"""
def __del__(self):
if current_process().name != "MainProcess":
crash()
class CExitAtGCInWorker(object):
"""Exit worker at call item GC time"""
def __del__(self):
if current_process().name != "MainProcess":
c_exit()
class TestExecutorDeadLock(ReusableExecutorMixin):
crash_cases = [
# Check problem occuring while pickling a task in
(id, (ExitAtPickle(),), PicklingError, None),
(id, (ErrorAtPickle(),), PicklingError, None),
# Check problem occuring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool, r"SystemExit"),
(id, (CExitAtUnpickle(),), TerminatedWorkerError, r"EXIT\(0\)"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool, r"UnpicklingError"),
(id, (CrashAtUnpickle(),), TerminatedWorkerError, r"SIGSEGV"),
# Check problem occuring during function execution on workers
(crash, (), TerminatedWorkerError, r"SIGSEGV"),
(exit, (), SystemExit, None),
(c_exit, (), TerminatedWorkerError, r"EXIT\(0\)"),
(raise_error, (RuntimeError,), RuntimeError, None),
# Check problem occuring while pickling a task result
# on workers
(return_instance, (CrashAtPickle,), TerminatedWorkerError, r"SIGSEGV"),
(return_instance, (ExitAtPickle,), SystemExit, None),
(return_instance, (CExitAtPickle,), TerminatedWorkerError,
r"EXIT\(0\)"),
(return_instance, (ErrorAtPickle,), PicklingError, None),
# Check problem occuring while unpickling a task in
# the result_handler thread
(return_instance, (ExitAtUnpickle,), BrokenProcessPool, r"SystemExit"),
(return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
r"UnpicklingError"),
]
@pytest.mark.parametrize("func, args, expected_err, match", crash_cases)
def test_crashes(self, func, args, expected_err, match):
"""Test various reusable_executor crash handling"""
executor = get_reusable_executor(max_workers=2)
res = executor.submit(func, *args)
match_err = None
if expected_err is TerminatedWorkerError:
match_err = filter_match(match)
match = None
with pytest.raises(expected_err, match=match_err) as exc_info:
res.result()
# For remote traceback, ensure that the cause contains the original
# error
if match is not None:
with pytest.raises(_RemoteTraceback, match=match):
raise exc_info.value.__cause__
@pytest.mark.parametrize("func, args, expected_err, match", crash_cases)
def test_in_callback_submit_with_crash(self, func, args, expected_err,
match):
"""Test the recovery from callback crash"""
executor = get_reusable_executor(max_workers=2, timeout=12)
def in_callback_submit(future):
future2 = get_reusable_executor(
max_workers=2, timeout=12).submit(func, *args)
# Store the future of the job submitted in the callback to make it
# easy to introspect.
future.callback_future = future2
future.callback_done.set()
# Make sure the first submitted job last a bit to make sure that
# the callback will be called in the queue manager thread and not
# immediately in the main thread.
delay = 0.1
f = executor.submit(id_sleep, 42, delay)
f.callback_done = threading.Event()
f.add_done_callback(in_callback_submit)
assert f.result() == 42
if not f.callback_done.wait(timeout=3):
raise AssertionError('callback not done before timeout')
match_err = None
if expected_err is TerminatedWorkerError:
match_err = filter_match(match)
match = None
with pytest.raises(expected_err, match=match_err) as exc_info:
f.callback_future.result()
# For remote traceback, ensure that the cause contains the original
# error
if match is not None:
with pytest.raises(_RemoteTraceback, match=match):
raise exc_info.value.__cause__
def test_callback_crash_on_submit(self):
"""Errors in the callback execution directly in queue manager thread.
This case can break the process executor and we want to make sure
that we can detect the issue and recover by calling
get_reusable_executor.
"""
executor = get_reusable_executor(max_workers=2)
# Make sure the first submitted job last a bit to make sure that
# the callback will be called in the queue manager thread and not
# immediately in the main thread.
delay = 0.1
f = executor.submit(id_sleep, 42, delay)
f.add_done_callback(lambda _: exit())
assert f.result() == 42
assert executor.submit(id_sleep, 42, 0.1).result() == 42
executor = get_reusable_executor(max_workers=2)
f = executor.submit(id_sleep, 42, delay)
f.add_done_callback(lambda _: raise_error())
assert f.result() == 42
assert executor.submit(id_sleep, 42, 0.).result() == 42
def test_deadlock_kill(self):
"""Test deadlock recovery for reusable_executor"""
executor = get_reusable_executor(max_workers=1, timeout=None)
# trigger the spawning of the worker process
executor.submit(sleep, 0.1)
worker = next(iter(executor._processes.values()))
with pytest.warns(UserWarning) as recorded_warnings:
executor = get_reusable_executor(max_workers=2, timeout=None)
assert len(recorded_warnings) == 1
expected_msg = ("Trying to resize an executor with running jobs:"
" waiting for jobs completion before resizing.")
assert recorded_warnings[0].message.args[0] == expected_msg
os.kill(worker.pid, SIGKILL)
wait_dead(worker)
# wait for the executor to be able to detect the issue and set itself
# in broken state:
sleep(.5)
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
executor.submit(id_sleep, 42, 0.1).result()
# the get_reusable_executor factory should be able to create a new
# working instance
executor = get_reusable_executor(max_workers=2, timeout=None)
assert executor.submit(id_sleep, 42, 0.).result() == 42
@pytest.mark.parametrize("n_proc", [1, 2, 5, 13])
def test_crash_races(self, n_proc):
"""Test the race conditions in reusable_executor crash handling"""
if (sys.platform == 'win32' and sys.version_info >= (3, 8)
and n_proc > 5):
pytest.skip(
"On win32, the paging size can be too small to import numpy "
"multiple times in the sub-processes (imported when loading "
"this file). Skipping while no better solution is found. See "
"https://github.com/joblib/loky/issues/279 for more details."
)
# Test for external crash signal comming from neighbor
# with various race setup
executor = get_reusable_executor(max_workers=n_proc, timeout=None)
executor.map(id, range(n_proc)) # trigger the creation of the workers
pids = list(executor._processes.keys())
assert len(pids) == n_proc
assert None not in pids
res = executor.map(sleep_then_check_pids_exist,
[(.0001 * (j // 2), pids)
for j in range(2 * n_proc)])
assert all(list(res))
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
res = executor.map(kill_friend, pids[::-1])
list(res)
def test_imap_handle_iterable_exception(self):
# The catch of the errors in imap generation depend on the
# builded version of python
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(10, 3),
chunksize=1)
# SayWhenError seen at start of problematic chunk's results
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(20, 7),
chunksize=2)
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(20, 7),
chunksize=4)
def test_queue_full_deadlock(self):
executor = get_reusable_executor(max_workers=1)
fs_fail = [executor.submit(do_nothing, ErrorAtPickle(True))
for i in range(100)]
fs = [executor.submit(do_nothing, ErrorAtPickle(False))
for i in range(100)]
with pytest.raises(PicklingError):
fs_fail[99].result()
assert fs[99].result()
def test_informative_error_when_fail_at_unpickle(self):
executor = get_reusable_executor(max_workers=2)
obj = ErrorAtUnpickle(RuntimeError, 'message raised in child')
f = executor.submit(id, obj)
with pytest.raises(BrokenProcessPool) as exc_info:
f.result()
assert 'RuntimeError' in str(exc_info.value.__cause__)
assert 'message raised in child' in str(exc_info.value.__cause__)
@pytest.mark.skipif(np is None, reason="requires numpy")
def test_numpy_dot_parent_and_child_no_freeze(self):
"""Test that no freeze happens in child process when numpy's thread
pool is started in the parent.
"""
a = np.random.randn(1000, 1000)
np.dot(a, a) # trigger the thread pool init in the parent process
executor = get_reusable_executor(max_workers=2)
executor.submit(np.dot, a, a).result()
executor.shutdown(wait=True)
class TestTerminateExecutor(ReusableExecutorMixin):
def test_shutdown_kill(self):
"""Test reusable_executor termination handling"""
from itertools import repeat
executor = get_reusable_executor(max_workers=5)
res1 = executor.map(id_sleep, range(100), repeat(.001))
res2 = executor.map(id_sleep, range(100), repeat(1))
assert list(res1) == list(range(100))
shutdown = TimingWrapper(executor.shutdown)
shutdown(wait=True, kill_workers=True)
assert shutdown.elapsed < 5
# We should get an error as the executor shutdowned before we fetched
# all the results from the long running operation.
with pytest.raises(ShutdownExecutorError):
list(res2)
def test_shutdown_deadlock(self):
"""Test recovery if killed after resize call"""
# Test the executor.shutdown call do not cause deadlock
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2)) # start the worker processes
executor.submit(kill_friend, (next(iter(executor._processes.keys())),
.0))
sleep(.01)
executor.shutdown(wait=True)
def test_kill_workers_on_new_options(self):
# submit a long running job with no timeout
executor = get_reusable_executor(max_workers=2, timeout=None)
f = executor.submit(sleep, 10000)
# change the constructor parameter while requesting not to wait
# for the long running task to complete (the workers will get
# shutdown forcibly)
executor = get_reusable_executor(max_workers=2, timeout=5,
kill_workers=True)
with pytest.raises(ShutdownExecutorError):
f.result()
f2 = executor.submit(id_sleep, 42, 0)
assert f2.result() == 42
@pytest.mark.parametrize("bad_object, match", [
(CrashAtGCInWorker, r"SIGSEGV"), (CExitAtGCInWorker, r"EXIT\(0\)")])
def test_call_item_gc_crash_or_exit(self, bad_object, match):
executor = get_reusable_executor(max_workers=1)
bad_object = bad_object()
f = executor.submit(id, bad_object)
# The worker will successfully send back its result to the master
# process before crashing so this future can always be collected:
assert f.result() is not None
# The executor should automatically detect that the worker has crashed
# when processing subsequently dispatched tasks:
with pytest.raises(TerminatedWorkerError, match=filter_match(match)):
executor.submit(gc.collect).result()
for r in executor.map(sleep, [.1] * 100):
pass
class TestResizeExecutor(ReusableExecutorMixin):
def test_reusable_executor_resize(self):
"""Test reusable_executor resizing"""
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2))
# Decreasing the executor should drop a single process and keep one of
# the old one as it is still in a good shape. The resize should not
# occur while there are on going works.
pids = list(executor._processes.keys())
res1 = executor.submit(sleep_then_check_pids_exist, (.3, pids))
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(w) == 1
expected_msg = "Trying to resize an executor with running jobs"
assert expected_msg in str(w[0].message)
assert res1.result(), ("Resize should wait for current processes "
" to finish")
assert len(executor._processes) == 1
assert next(iter(executor._processes.keys())) in pids
# Requesting the same number of process should not impact the executor
# nor kill the processed
old_pid = next(iter((executor._processes.keys())))
unchanged_executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(unchanged_executor._processes) == 1
assert unchanged_executor is executor
assert next(iter(unchanged_executor._processes.keys())) == old_pid
# Growing the executor again should add a single process and keep the
# old one as it is still in a good shape
executor = get_reusable_executor(max_workers=2, timeout=None)
assert len(executor._processes) == 2
assert old_pid in list(executor._processes.keys())
@pytest.mark.parametrize("reuse", [True, False])
@pytest.mark.parametrize("kill_workers", [True, False])
def test_reusable_executor_resize_many_times(self, kill_workers, reuse):
# Tentative non-regression test for a deadlock when shutting down
# the workers of an executor prior to resizing it.
kwargs = {
'timeout': None,
'kill_workers': kill_workers,
'reuse': reuse,
}
with warnings.catch_warnings(record=True):
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
for size in [12, 2, 1, 12, 6, 1, 8, 5]:
executor = get_reusable_executor(max_workers=size, **kwargs)
executor.map(sleep, [0.01] * 6)
# Do not wait for the tasks to complete.
executor.shutdown()
def test_kill_after_resize_call(self):
"""Test recovery if killed after resize call"""
# Test the executor resizing called before a kill arrive
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2)) # trigger the creation of worker processes
pid = next(iter(executor._processes.keys()))
executor.submit(kill_friend, (pid, .1))
with pytest.warns(UserWarning) as recorded_warnings:
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(recorded_warnings) == 1
expected_msg = ("Trying to resize an executor with running jobs:"
" waiting for jobs completion before resizing.")
assert recorded_warnings[0].message.args[0] == expected_msg
assert executor.submit(id_sleep, 42, 0.).result() == 42
executor.shutdown()
def test_resize_after_timeout(self):
with warnings.catch_warnings(record=True) as recorded_warnings:
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=2, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
sleep(.1)
executor = get_reusable_executor(max_workers=8, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
sleep(.1)
executor = get_reusable_executor(max_workers=2, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
if len(recorded_warnings) > 1:
expected_msg = 'A worker stopped'
assert expected_msg in recorded_warnings[0].message.args[0]
class TestGetReusableExecutor(ReusableExecutorMixin):
def test_invalid_process_number(self):
"""Raise error on invalid process number"""
with pytest.raises(ValueError):
get_reusable_executor(max_workers=0)
with pytest.raises(ValueError):
get_reusable_executor(max_workers=-1)
executor = get_reusable_executor()
with pytest.raises(ValueError):
executor._resize(max_workers=None)
@pytest.mark.skipif(sys.platform == "win32", reason="No fork on windows")
@pytest.mark.skipif(sys.version_info <= (3, 4),
reason="No context before 3.4")
def test_invalid_context(self):
"""Raise error on invalid context"""
with pytest.warns(UserWarning):
with pytest.raises(ValueError):
get_reusable_executor(max_workers=2, context="fork")
def test_pass_start_method_name_as_context(self):
executor = get_reusable_executor(max_workers=2, context='loky')
assert executor.submit(id, 42).result() >= 0
with pytest.raises(ValueError):
get_reusable_executor(max_workers=2, context='bad_start_method')
def test_interactively_defined_executor_no_main(self):
# check that the init_main_module parameter works properly
# when using -c option, we don't need the safeguard if __name__ ..
# and thus test LokyProcess without the extra argument. For running
# a script, it is necessary to use init_main_module=False.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor()
e.submit(id, 42).result()
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_reused_flag(self):
executor, _ = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
executor, reused = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
assert reused
executor.shutdown(kill_workers=True)
executor, reused = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
assert not reused
@pytest.mark.xfail(cloudpickle_version >= LooseVersion("0.5.4") and
cloudpickle_version <= LooseVersion("0.7.0"),
reason="Known issue in cloudpickle")
# https://github.com/cloudpipe/cloudpickle/pull/240
def test_interactively_defined_nested_functions(self):
# Check that it's possible to call nested interactively defined
# functions and furthermore that changing the code interactively
# is taken into account by the single worker process.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor(max_workers=1)
# Force a start of the children process:
e.submit(id, 42).result()
# Test that it's possible to call interactively defined, nested
# functions:
def inner_func(x):
return -x
def outer_func(x):
return inner_func(x)
assert e.submit(outer_func, 1).result() == outer_func(1) == -1
# Test that changes to the definition of the inner function are
# taken into account in subsequent calls to the outer function.
def inner_func(x):
return x
assert e.submit(outer_func, 1).result() == outer_func(1) == 1
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_interactively_defined_recursive_functions(self):
# Check that it's possible to call a recursive function defined
# in a closure.
# Also check that calling several function that stems from the same
# factory with different closure states results in the expected result:
# the function definitions should not collapse in the single worker
# process.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor(max_workers=1)
# Force a start of the children process:
e.submit(id, 42).result()
def make_func(seed):
def func(x):
if x <= 0:
return seed
return func(x - 1) + 1
return func
func = make_func(0)
assert e.submit(func, 5).result() == func(5) == 5
func = make_func(1)
assert e.submit(func, 5).result() == func(5) == 6
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_compat_with_concurrent_futures_exception(self):
# It should be possible to use a loky process pool executor as a dropin
# replacement for a ProcessPoolExecutor, including when catching
# exceptions:
concurrent = pytest.importorskip('concurrent')
from concurrent.futures.process import BrokenProcessPool as BPPExc
with pytest.raises(BPPExc):
get_reusable_executor(max_workers=2).submit(crash).result()
e = get_reusable_executor(max_workers=2)
f = e.submit(id, 42)
# Ensure that loky.Future are compatible with concurrent.futures
# (see #155)
assert isinstance(f, concurrent.futures.Future)
(done, running) = concurrent.futures.wait([f], timeout=15)
assert len(running) == 0
thread_configurations = [
('constant', 'clean_start'),
('constant', 'broken_start'),
('varying', 'clean_start'),
('varying', 'broken_start'),
]
@pytest.mark.parametrize("workers, executor_state", thread_configurations)
def test_reusable_executor_thread_safety(self, workers, executor_state):
if executor_state == 'clean_start':
# Create a new shared executor and ensures that it's workers are
# ready:
get_reusable_executor(reuse=False).submit(id, 42).result()
else:
# Break the shared executor before launching the threads:
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGSEGV")):
executor = get_reusable_executor(reuse=False)
executor.submit(return_instance, CrashAtPickle).result()
def helper_func(output_collector, max_workers=2, n_outer_steps=5,
n_inner_steps=10):
with warnings.catch_warnings(): # ignore resize warnings
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=max_workers)
for i in range(n_outer_steps):
results = executor.map(
lambda x: x ** 2, range(n_inner_steps))
expected_result = [x ** 2 for x in range(n_inner_steps)]
assert list(results) == expected_result
output_collector.append('ok')
if workers == 'constant':
max_workers = [2] * 10
else:
max_workers = [(i % 4) + 1 for i in range(10)]
# Use the same executor with the same number of workers concurrently
# in different threads:
output_collector = []
threads = [threading.Thread(
target=helper_func, args=(output_collector, w),
name='test_thread_%02d_max_workers_%d' % (i, w))
for i, w in enumerate(max_workers)]
with warnings.catch_warnings(record=True):
for t in threads:
t.start()
for t in threads:
t.join()
assert output_collector == ['ok'] * len(threads)
def test_reusable_executor_reuse_true(self):
executor = get_reusable_executor(max_workers=3, timeout=42)
executor.submit(id, 42).result()
assert len(executor._processes) == 3
assert executor._timeout == 42
executor2 = get_reusable_executor(reuse=True)
executor2.submit(id, 42).result()
assert len(executor2._processes) == 3
assert executor2._timeout == 42
assert executor2 is executor
executor3 = get_reusable_executor()
executor3.submit(id, 42).result()
assert len(executor3._processes) == cpu_count()
assert executor3._timeout == 10
assert executor3 is not executor
executor4 = get_reusable_executor()
assert executor4 is executor3
class TestExecutorInitializer(ReusableExecutorMixin):
def _initializer(self, x):
loky._initialized_state = x
def _test_initializer(self, delay=0):
sleep(delay)
return getattr(loky, "_initialized_state", "uninitialized")
def test_reusable_initializer(self):
executor = get_reusable_executor(
max_workers=2, initializer=self._initializer, initargs=('done',))
assert executor.submit(self._test_initializer).result() == 'done'
# when the initializer change, the executor is re-spawned
executor = get_reusable_executor(
max_workers=2, initializer=self._initializer, initargs=(42,))
assert executor.submit(self._test_initializer).result() == 42
# With reuse=True, the executor use the same initializer
executor = get_reusable_executor(max_workers=4, reuse=True)
for x in executor.map(self._test_initializer, delay=.1):
assert x == 42
# With reuse='auto', the initializer is not used anymore
executor = get_reusable_executor(max_workers=4)
for x in executor.map(self._test_initializer, delay=.1):
assert x == 'uninitialized'
|
test/connector/exchange/wazirx/test_wazirx_user_stream_tracker.py | BGTCapital/hummingbot | 3,027 | 14094 | <reponame>BGTCapital/hummingbot
#!/usr/bin/env python
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../../")))
import conf
from hummingbot.connector.exchange.wazirx.wazirx_api_order_book_data_source import WazirxAPIOrderBookDataSource
from hummingbot.connector.exchange.wazirx.wazirx_user_stream_tracker import WazirxUserStreamTracker
from hummingbot.connector.exchange.wazirx.wazirx_auth import WazirxAuth
import asyncio
from hummingbot.core.utils.async_utils import safe_ensure_future
import logging
import unittest
trading_pairs = ["BTC-INR", "ZRX-INR"]
class WazirxUserStreamTrackerUnitTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
api_key = conf.wazirx_api_key
secret_key = conf.wazirx_secret_key
cls.wazirx_auth = WazirxAuth(api_key, secret_key)
cls.wazirx_orderbook_data_source = WazirxAPIOrderBookDataSource(trading_pairs=trading_pairs)
cls.user_stream_tracker: WazirxUserStreamTracker = WazirxUserStreamTracker(cls.wazirx_auth, trading_pairs)
def run_async(self, task):
return self.ev_loop.run_until_complete(task)
async def _iter_user_event_queue(self):
while True:
try:
yield await self.user_stream_tracker.user_stream.get()
except asyncio.CancelledError:
raise
except Exception:
raise
async def _user_stream_event_listener(self):
""" Wait for 5 events to be seen """
count = 0
async for event_message in self._iter_user_event_queue():
logging.info(event_message)
if count > 5:
return
count += 1
def test_user_stream(self):
safe_ensure_future(self.user_stream_tracker.start())
# Wait process some msgs.
self.ev_loop.run_until_complete(self._user_stream_event_listener())
logging.info(self.user_stream_tracker.user_stream)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
|
src/python/setup.py | blaine141/NVISII | 149 | 14117 | # Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
from setuptools import setup, dist
import wheel
import os
# required to geneerate a platlib folder required by audittools
from setuptools.command.install import install
# for generating a wheel version from git tag
from setuptools_scm import get_version
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
if self.distribution.has_ext_modules():
self.install_lib = self.install_platlib
# force setuptools to recognize that this is
# actually a binary distribution
class BinaryDistribution(dist.Distribution):
def is_pure(self):
return False
def has_ext_modules(foo):
return True
# This gets the version from the most recent git tag, potentially concatinating
# a commit hash at the end.
current_version = get_version(
root = "..",
relative_to = __file__,
fallback_version='0.0.0-dev0'
)
optix_version = os.environ.get("OPTIX_VERSION", None)
if optix_version:
current_version = current_version + "." + optix_version
print(current_version)
setup(
# This package is called nvisii
name='nvisii',
install_requires = ['numpy>=1.19.5'],
packages = ['nvisii', "nvisii.importers"], # include the package "nvisii"
# make sure the shared library is included
package_data = {'': ("*.dll", "*.pyd", "*.so")},
include_package_data=True,
description='',
# See class BinaryDistribution that was defined earlier
distclass=BinaryDistribution,
version = current_version,
author='<NAME>',
author_email='',
maintainer='',
maintainer_email='',
python_requires = ">=3.6",
cmdclass={'install': InstallPlatlib},
)
|
rojak-analyzer/generate_stopwords.py | pyk/rojak | 107 | 14145 | # Run this script to create stopwords.py based on stopwords.txt
import json
def generate(input_txt, output_py):
# Read line by line
txt_file = open(input_txt)
words = set([])
for raw_line in txt_file:
line = raw_line.strip()
# Skip empty line
if len(line) < 1: continue
# Skip comments
if line[0] == '#': continue
# Collect the stopwords
words.add(line)
# Dump the array to a file
output = open(output_py, 'w')
output.write('# DO NOT EDIT THIS FILE!\n')
output.write('# Edit stopwords.txt, generate this file again via ')
output.write('generate_stopwords.py\n')
output.write('stopwords = set(%s)' % (json.dumps(sorted(words),
indent=4)))
output.close()
txt_file.close()
if __name__ == '__main__':
generate('stopwords.txt', 'stopwords.py')
|
pyexchange/exchange2010/__init__.py | tedeler/pyexchange | 128 | 14147 | """
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
from ..base.calendar import BaseExchangeCalendarEvent, BaseExchangeCalendarService, ExchangeEventOrganizer, ExchangeEventResponse
from ..base.folder import BaseExchangeFolder, BaseExchangeFolderService
from ..base.soap import ExchangeServiceSOAP
from ..exceptions import FailedExchangeException, ExchangeStaleChangeKeyException, ExchangeItemNotFoundException, ExchangeInternalServerTransientErrorException, ExchangeIrresolvableConflictException, InvalidEventType
from ..compat import BASESTRING_TYPES
from . import soap_request
from lxml import etree
from copy import deepcopy
from datetime import date
import warnings
log = logging.getLogger("pyexchange")
class Exchange2010Service(ExchangeServiceSOAP):
def calendar(self, id="calendar"):
return Exchange2010CalendarService(service=self, calendar_id=id)
def mail(self):
raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")
def contacts(self):
raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)")
def folder(self):
return Exchange2010FolderService(service=self)
def _send_soap_request(self, body, headers=None, retries=2, timeout=30, encoding="utf-8"):
headers = {
"Accept": "text/xml",
"Content-type": "text/xml; charset=%s " % encoding
}
return super(Exchange2010Service, self)._send_soap_request(body, headers=headers, retries=retries, timeout=timeout, encoding=encoding)
def _check_for_errors(self, xml_tree):
super(Exchange2010Service, self)._check_for_errors(xml_tree)
self._check_for_exchange_fault(xml_tree)
def _check_for_exchange_fault(self, xml_tree):
# If the request succeeded, we should see a <m:ResponseCode>NoError</m:ResponseCode>
# somewhere in the response. if we don't (a) see the tag or (b) it doesn't say "NoError"
# then flip out
response_codes = xml_tree.xpath(u'//m:ResponseCode', namespaces=soap_request.NAMESPACES)
if not response_codes:
raise FailedExchangeException(u"Exchange server did not return a status response", None)
# The full (massive) list of possible return responses is here.
# http://msdn.microsoft.com/en-us/library/aa580757(v=exchg.140).aspx
for code in response_codes:
if code.text == u"ErrorChangeKeyRequiredForWriteOperations":
# change key is missing or stale. we can fix that, so throw a special error
raise ExchangeStaleChangeKeyException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorItemNotFound":
# exchange_invite_key wasn't found on the server
raise ExchangeItemNotFoundException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorIrresolvableConflict":
# tried to update an item with an old change key
raise ExchangeIrresolvableConflictException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorInternalServerTransientError":
# temporary internal server error. throw a special error so we can retry
raise ExchangeInternalServerTransientErrorException(u"Exchange Fault (%s) from Exchange server" % code.text)
elif code.text == u"ErrorCalendarOccurrenceIndexIsOutOfRecurrenceRange":
# just means some or all of the requested instances are out of range
pass
elif code.text != u"NoError":
raise FailedExchangeException(u"Exchange Fault (%s) from Exchange server" % code.text)
class Exchange2010CalendarService(BaseExchangeCalendarService):
def event(self, id=None, **kwargs):
return Exchange2010CalendarEvent(service=self.service, id=id, **kwargs)
def get_event(self, id):
return Exchange2010CalendarEvent(service=self.service, id=id)
def new_event(self, **properties):
return Exchange2010CalendarEvent(service=self.service, calendar_id=self.calendar_id, **properties)
def list_events(self, start=None, end=None, details=False, delegate_for=None):
return Exchange2010CalendarEventList(service=self.service, calendar_id=self.calendar_id, start=start, end=end, details=details, delegate_for=delegate_for)
class Exchange2010CalendarEventList(object):
"""
Creates & Stores a list of Exchange2010CalendarEvent items in the "self.events" variable.
"""
def __init__(self, service=None, calendar_id=u'calendar', start=None, end=None, details=False, delegate_for=None):
self.service = service
self.count = 0
self.start = start
self.end = end
self.events = list()
self.event_ids = list()
self.details = details
self.delegate_for = delegate_for
# This request uses a Calendar-specific query between two dates.
body = soap_request.get_calendar_items(format=u'AllProperties', calendar_id=calendar_id, start=self.start, end=self.end, delegate_for=self.delegate_for)
response_xml = self.service.send(body)
self._parse_response_for_all_events(response_xml)
# Populate the event ID list, for convenience reasons.
for event in self.events:
self.event_ids.append(event._id)
# If we have requested all the details, basically repeat the previous 3 steps,
# but instead of start/stop, we have a list of ID fields.
if self.details:
log.debug(u'Received request for all details, retrieving now!')
self.load_all_details()
return
def _parse_response_for_all_events(self, response):
"""
This function will retrieve *most* of the event data, excluding Organizer & Attendee details
"""
items = response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if not items:
items = response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if items:
self.count = len(items)
log.debug(u'Found %s items' % self.count)
for item in items:
self._add_event(xml=soap_request.M.Items(deepcopy(item)))
else:
log.debug(u'No calendar items found with search parameters.')
return self
def _add_event(self, xml=None):
log.debug(u'Adding new event to all events list.')
event = Exchange2010CalendarEvent(service=self.service, xml=xml)
log.debug(u'Subject of new event is %s' % event.subject)
self.events.append(event)
return self
def load_all_details(self):
"""
This function will execute all the event lookups for known events.
This is intended for use when you want to have a completely populated event entry, including
Organizer & Attendee details.
"""
log.debug(u"Loading all details")
if self.count > 0:
# Now, empty out the events to prevent duplicates!
del(self.events[:])
# Send the SOAP request with the list of exchange ID values.
log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids)))
body = soap_request.get_item(exchange_id=self.event_ids, format=u'AllProperties')
response_xml = self.service.send(body)
# Re-parse the results for all the details!
self._parse_response_for_all_events(response_xml)
return self
class Exchange2010CalendarEvent(BaseExchangeCalendarEvent):
def _init_from_service(self, id):
log.debug(u'Creating new Exchange2010CalendarEvent object from ID')
body = soap_request.get_item(exchange_id=id, format=u'AllProperties')
response_xml = self.service.send(body)
properties = self._parse_response_for_get_event(response_xml)
self._update_properties(properties)
self._id = id
log.debug(u'Created new event object with ID: %s' % self._id)
self._reset_dirty_attributes()
return self
def _init_from_xml(self, xml=None):
log.debug(u'Creating new Exchange2010CalendarEvent object from XML')
properties = self._parse_response_for_get_event(xml)
self._update_properties(properties)
self._id, self._change_key = self._parse_id_and_change_key_from_response(xml)
log.debug(u'Created new event object with ID: %s' % self._id)
self._reset_dirty_attributes()
return self
def as_json(self):
raise NotImplementedError
def validate(self):
if self.recurrence is not None:
if not (isinstance(self.recurrence_end_date, date)):
raise ValueError('recurrence_end_date must be of type date')
elif (self.recurrence_end_date < self.start.date()):
raise ValueError('recurrence_end_date must be after start')
if self.recurrence == u'daily':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 999):
raise ValueError('recurrence_interval must be an int in the range from 1 to 999')
elif self.recurrence == u'weekly':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99):
raise ValueError('recurrence_interval must be an int in the range from 1 to 99')
if self.recurrence_days is None:
raise ValueError('recurrence_days is required')
for day in self.recurrence_days.split(' '):
if day not in self.WEEKLY_DAYS:
raise ValueError('recurrence_days received unknown value: %s' % day)
elif self.recurrence == u'monthly':
if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99):
raise ValueError('recurrence_interval must be an int in the range from 1 to 99')
elif self.recurrence == u'yearly':
pass # everything is pulled from start
else:
raise ValueError('recurrence received unknown value: %s' % self.recurrence)
super(Exchange2010CalendarEvent, self).validate()
def create(self):
"""
Creates an event in Exchange. ::
event = service.calendar().new_event(
subject=u"80s Movie Night",
location = u"My house",
)
event.create()
Invitations to attendees are sent out immediately.
"""
self.validate()
body = soap_request.new_event(self)
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def resend_invitations(self):
"""
Resends invites for an event. ::
event = service.calendar().get_event(id='KEY HERE')
event.resend_invitations()
Anybody who has not declined this meeting will get a new invite.
"""
if not self.id:
raise TypeError(u"You can't send invites for an event that hasn't been created yet.")
# Under the hood, this is just an .update() but with no attributes changed.
# We're going to enforce that by checking if there are any changed attributes and bail if there are
if self._dirty_attributes:
raise ValueError(u"There are unsaved changes to this invite - please update it first: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, [], calendar_item_update_operation_type=u'SendOnlyToAll')
self.service.send(body)
return self
def update(self, calendar_item_update_operation_type=u'SendToAllAndSaveCopy', **kwargs):
"""
Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``.
"""
if not self.id:
raise TypeError(u"You can't update an event that hasn't been created yet.")
if 'send_only_to_changed_attendees' in kwargs:
warnings.warn(
"The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead.",
DeprecationWarning,
) # 20140502
if kwargs['send_only_to_changed_attendees']:
calendar_item_update_operation_type = u'SendToChangedAndSaveCopy'
VALID_UPDATE_OPERATION_TYPES = (
u'SendToNone', u'SendOnlyToAll', u'SendOnlyToChanged',
u'SendToAllAndSaveCopy', u'SendToChangedAndSaveCopy',
)
if calendar_item_update_operation_type not in VALID_UPDATE_OPERATION_TYPES:
raise ValueError('calendar_item_update_operation_type has unknown value')
self.validate()
if self._dirty_attributes:
log.debug(u"Updating these attributes: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, self._dirty_attributes, calendar_item_update_operation_type=calendar_item_update_operation_type)
self.service.send(body)
self._reset_dirty_attributes()
else:
log.info(u"Update was called, but there's nothing to update. Doing nothing.")
return self
def cancel(self):
"""
Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting.
"""
if not self.id:
raise TypeError(u"You can't delete an event that hasn't been created yet.")
self.refresh_change_key()
self.service.send(soap_request.delete_event(self))
# TODO rsanders high - check return status to make sure it was actually sent
return None
def move_to(self, folder_id):
"""
:param str folder_id: The Calendar ID to where you want to move the event to.
Moves an event to a different folder (calendar). ::
event = service.calendar().get_event(id='KEY HERE')
event.move_to(folder_id='NEW CALENDAR KEY HERE')
"""
if not folder_id:
raise TypeError(u"You can't move an event to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move an event that hasn't been created yet.")
self.refresh_change_key()
response_xml = self.service.send(soap_request.move_event(self, folder_id))
new_id, new_change_key = self._parse_id_and_change_key_from_response(response_xml)
if not new_id:
raise ValueError(u"MoveItem returned success but requested item not moved")
self._id = new_id
self._change_key = new_change_key
self.calendar_id = folder_id
return self
def get_master(self):
"""
get_master()
:raises InvalidEventType: When this method is called on an event that is not a Occurrence type.
This will return the master event to the occurrence.
**Examples**::
event = service.calendar().get_event(id='<event_id>')
print event.type # If it prints out 'Occurrence' then that means we could get the master.
master = event.get_master()
print master.type # Will print out 'RecurringMaster'.
"""
if self.type != 'Occurrence':
raise InvalidEventType("get_master method can only be called on a 'Occurrence' event type")
body = soap_request.get_master(exchange_id=self._id, format=u"AllProperties")
response_xml = self.service.send(body)
return Exchange2010CalendarEvent(service=self.service, xml=response_xml)
def get_occurrence(self, instance_index):
"""
get_occurrence(instance_index)
:param iterable instance_index: This should be tuple or list of integers which correspond to occurrences.
:raises TypeError: When instance_index is not an iterable of ints.
:raises InvalidEventType: When this method is called on an event that is not a RecurringMaster type.
This will return a list of occurrence events.
**Examples**::
master = service.calendar().get_event(id='<event_id>')
# The following will return the first 20 occurrences in the recurrence.
# If there are not 20 occurrences, it will only return what it finds.
occurrences = master.get_occurrence(range(1,21))
for occurrence in occurrences:
print occurrence.start
"""
if not all([isinstance(i, int) for i in instance_index]):
raise TypeError("instance_index must be an interable of type int")
if self.type != 'RecurringMaster':
raise InvalidEventType("get_occurrance method can only be called on a 'RecurringMaster' event type")
body = soap_request.get_occurrence(exchange_id=self._id, instance_index=instance_index, format=u"AllProperties")
response_xml = self.service.send(body)
items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES)
events = []
for item in items:
event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item))
if event.id:
events.append(event)
return events
def conflicting_events(self):
"""
conflicting_events()
This will return a list of conflicting events.
**Example**::
event = service.calendar().get_event(id='<event_id>')
for conflict in event.conflicting_events():
print conflict.subject
"""
if not self.conflicting_event_ids:
return []
body = soap_request.get_item(exchange_id=self.conflicting_event_ids, format="AllProperties")
response_xml = self.service.send(body)
items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES)
events = []
for item in items:
event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item))
if event.id:
events.append(event)
return events
def refresh_change_key(self):
body = soap_request.get_item(exchange_id=self._id, format=u"IdOnly")
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def _parse_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//m:Items/t:CalendarItem/t:ItemId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
def _parse_response_for_get_event(self, response):
result = self._parse_event_properties(response)
organizer_properties = self._parse_event_organizer(response)
if organizer_properties is not None:
if 'email' not in organizer_properties:
organizer_properties['email'] = None
result[u'organizer'] = ExchangeEventOrganizer(**organizer_properties)
attendee_properties = self._parse_event_attendees(response)
result[u'_attendees'] = self._build_resource_dictionary([ExchangeEventResponse(**attendee) for attendee in attendee_properties])
resource_properties = self._parse_event_resources(response)
result[u'_resources'] = self._build_resource_dictionary([ExchangeEventResponse(**resource) for resource in resource_properties])
result['_conflicting_event_ids'] = self._parse_event_conflicts(response)
return result
def _parse_event_properties(self, response):
property_map = {
u'subject': {
u'xpath': u'//m:Items/t:CalendarItem/t:Subject',
},
u'location':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Location',
},
u'availability':
{
u'xpath': u'//m:Items/t:CalendarItem/t:LegacyFreeBusyStatus',
},
u'start':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Start',
u'cast': u'datetime',
},
u'end':
{
u'xpath': u'//m:Items/t:CalendarItem/t:End',
u'cast': u'datetime',
},
u'html_body':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Body[@BodyType="HTML"]',
},
u'text_body':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Body[@BodyType="Text"]',
},
u'_type':
{
u'xpath': u'//m:Items/t:CalendarItem/t:CalendarItemType',
},
u'reminder_minutes_before_start':
{
u'xpath': u'//m:Items/t:CalendarItem/t:ReminderMinutesBeforeStart',
u'cast': u'int',
},
u'is_all_day':
{
u'xpath': u'//m:Items/t:CalendarItem/t:IsAllDayEvent',
u'cast': u'bool',
},
u'recurrence_end_date':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/t:EndDateRecurrence/t:EndDate',
u'cast': u'date_only_naive',
},
u'recurrence_interval':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/*/t:Interval',
u'cast': u'int',
},
u'recurrence_days':
{
u'xpath': u'//m:Items/t:CalendarItem/t:Recurrence/t:WeeklyRecurrence/t:DaysOfWeek',
},
}
result = self.service._xpath_to_dict(element=response, property_map=property_map, namespace_map=soap_request.NAMESPACES)
try:
recurrence_node = response.xpath(u'//m:Items/t:CalendarItem/t:Recurrence', namespaces=soap_request.NAMESPACES)[0]
except IndexError:
recurrence_node = None
if recurrence_node is not None:
if recurrence_node.find('t:DailyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'daily'
elif recurrence_node.find('t:WeeklyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'weekly'
elif recurrence_node.find('t:AbsoluteMonthlyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'monthly'
elif recurrence_node.find('t:AbsoluteYearlyRecurrence', namespaces=soap_request.NAMESPACES) is not None:
result['recurrence'] = 'yearly'
return result
def _parse_event_organizer(self, response):
organizer = response.xpath(u'//m:Items/t:CalendarItem/t:Organizer/t:Mailbox', namespaces=soap_request.NAMESPACES)
property_map = {
u'name':
{
u'xpath': u't:Name'
},
u'email':
{
u'xpath': u't:EmailAddress'
},
}
if organizer:
return self.service._xpath_to_dict(element=organizer[0], property_map=property_map, namespace_map=soap_request.NAMESPACES)
else:
return None
def _parse_event_resources(self, response):
property_map = {
u'name':
{
u'xpath': u't:Mailbox/t:Name'
},
u'email':
{
u'xpath': u't:Mailbox/t:EmailAddress'
},
u'response':
{
u'xpath': u't:ResponseType'
},
u'last_response':
{
u'xpath': u't:LastResponseTime',
u'cast': u'datetime'
},
}
result = []
resources = response.xpath(u'//m:Items/t:CalendarItem/t:Resources/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in resources:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = True
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
return result
def _parse_event_attendees(self, response):
property_map = {
u'name':
{
u'xpath': u't:Mailbox/t:Name'
},
u'email':
{
u'xpath': u't:Mailbox/t:EmailAddress'
},
u'response':
{
u'xpath': u't:ResponseType'
},
u'last_response':
{
u'xpath': u't:LastResponseTime',
u'cast': u'datetime'
},
}
result = []
required_attendees = response.xpath(u'//m:Items/t:CalendarItem/t:RequiredAttendees/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in required_attendees:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = True
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
optional_attendees = response.xpath(u'//m:Items/t:CalendarItem/t:OptionalAttendees/t:Attendee', namespaces=soap_request.NAMESPACES)
for attendee in optional_attendees:
attendee_properties = self.service._xpath_to_dict(element=attendee, property_map=property_map, namespace_map=soap_request.NAMESPACES)
attendee_properties[u'required'] = False
if u'last_response' not in attendee_properties:
attendee_properties[u'last_response'] = None
if u'email' in attendee_properties:
result.append(attendee_properties)
return result
def _parse_event_conflicts(self, response):
conflicting_ids = response.xpath(u'//m:Items/t:CalendarItem/t:ConflictingMeetings/t:CalendarItem/t:ItemId', namespaces=soap_request.NAMESPACES)
return [id_element.get(u"Id") for id_element in conflicting_ids]
class Exchange2010FolderService(BaseExchangeFolderService):
def folder(self, id=None, **kwargs):
return Exchange2010Folder(service=self.service, id=id, **kwargs)
def get_folder(self, id):
"""
:param str id: The Exchange ID of the folder to retrieve from the Exchange store.
Retrieves the folder specified by the id, from the Exchange store.
**Examples**::
folder = service.folder().get_folder(id)
"""
return Exchange2010Folder(service=self.service, id=id)
def new_folder(self, **properties):
"""
new_folder(display_name=display_name, folder_type=folder_type, parent_id=parent_id)
:param str display_name: The display name given to the new folder.
:param str folder_type: The type of folder to create. Possible values are 'Folder',
'CalendarFolder', 'ContactsFolder', 'SearchFolder', 'TasksFolder'.
:param str parent_id: The parent folder where the new folder will be created.
Creates a new folder with the given properties. Not saved until you call the create() method.
**Examples**::
folder = service.folder().new_folder(
display_name=u"New Folder Name",
folder_type="CalendarFolder",
parent_id='calendar',
)
folder.create()
"""
return Exchange2010Folder(service=self.service, **properties)
def find_folder(self, parent_id):
"""
find_folder(parent_id)
:param str parent_id: The parent folder to list.
This method will return a list of sub-folders to a given parent folder.
**Examples**::
# Iterate through folders within the default 'calendar' folder.
folders = service.folder().find_folder(parent_id='calendar')
for folder in folders:
print(folder.display_name)
# Delete all folders within the 'calendar' folder.
folders = service.folder().find_folder(parent_id='calendar')
for folder in folders:
folder.delete()
"""
body = soap_request.find_folder(parent_id=parent_id, format=u'AllProperties')
response_xml = self.service.send(body)
return self._parse_response_for_find_folder(response_xml)
def _parse_response_for_find_folder(self, response):
result = []
folders = response.xpath(u'//t:Folders/t:*', namespaces=soap_request.NAMESPACES)
for folder in folders:
result.append(
Exchange2010Folder(
service=self.service,
xml=etree.fromstring(etree.tostring(folder)) # Might be a better way to do this
)
)
return result
class Exchange2010Folder(BaseExchangeFolder):
def _init_from_service(self, id):
body = soap_request.get_folder(folder_id=id, format=u'AllProperties')
response_xml = self.service.send(body)
properties = self._parse_response_for_get_folder(response_xml)
self._update_properties(properties)
return self
def _init_from_xml(self, xml):
properties = self._parse_response_for_get_folder(xml)
self._update_properties(properties)
return self
def create(self):
"""
Creates a folder in Exchange. ::
calendar = service.folder().new_folder(
display_name=u"New Folder Name",
folder_type="CalendarFolder",
parent_id='calendar',
)
calendar.create()
"""
self.validate()
body = soap_request.new_folder(self)
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self
def delete(self):
"""
Deletes a folder from the Exchange store. ::
folder = service.folder().get_folder(id)
print("Deleting folder: %s" % folder.display_name)
folder.delete()
"""
if not self.id:
raise TypeError(u"You can't delete a folder that hasn't been created yet.")
body = soap_request.delete_folder(self)
response_xml = self.service.send(body) # noqa
# TODO: verify deletion
self._id = None
self._change_key = None
return None
def move_to(self, folder_id):
"""
:param str folder_id: The Folder ID of what will be the new parent folder, of this folder.
Move folder to a different location, specified by folder_id::
folder = service.folder().get_folder(id)
folder.move_to(folder_id="ID of new location's folder")
"""
if not folder_id:
raise TypeError(u"You can't move to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move a folder that hasn't been created yet.")
response_xml = self.service.send(soap_request.move_folder(self, folder_id)) # noqa
result_id, result_key = self._parse_id_and_change_key_from_response(response_xml)
if self.id != result_id:
raise ValueError(u"MoveFolder returned success but requested folder not moved")
self.parent_id = folder_id
return self
def _parse_response_for_get_folder(self, response):
FOLDER_PATH = u'//t:Folder | //t:CalendarFolder | //t:ContactsFolder | //t:SearchFolder | //t:TasksFolder'
path = response.xpath(FOLDER_PATH, namespaces=soap_request.NAMESPACES)[0]
result = self._parse_folder_properties(path)
return result
def _parse_folder_properties(self, response):
property_map = {
u'display_name': {u'xpath': u't:DisplayName'},
}
self._id, self._change_key = self._parse_id_and_change_key_from_response(response)
self._parent_id = self._parse_parent_id_and_change_key_from_response(response)[0]
self.folder_type = etree.QName(response).localname
return self.service._xpath_to_dict(element=response, property_map=property_map, namespace_map=soap_request.NAMESPACES)
def _parse_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//t:FolderId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
def _parse_parent_id_and_change_key_from_response(self, response):
id_elements = response.xpath(u'//t:ParentFolderId', namespaces=soap_request.NAMESPACES)
if id_elements:
id_element = id_elements[0]
return id_element.get(u"Id", None), id_element.get(u"ChangeKey", None)
else:
return None, None
|
models/recall/word2vec/static_model.py | ziyoujiyi/PaddleRec | 2,739 | 14150 | <filename>models/recall/word2vec/static_model.py<gh_stars>1000+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
from net import Word2VecLayer, Word2VecInferLayer
class StaticModel(object):
def __init__(self, config):
self.cost = None
self.metrics = {}
self.config = config
self._init_hyper_parameters()
def _init_hyper_parameters(self):
self.sparse_feature_number = self.config.get(
"hyper_parameters.sparse_feature_number")
self.sparse_feature_dim = self.config.get(
"hyper_parameters.sparse_feature_dim")
self.neg_num = self.config.get("hyper_parameters.neg_num")
self.with_shuffle_batch = self.config.get(
"hyper_parameters.with_shuffle_batch")
self.learning_rate = self.config.get(
"hyper_parameters.optimizer.learning_rate")
self.decay_steps = self.config.get(
"hyper_parameters.optimizer.decay_steps")
self.decay_rate = self.config.get(
"hyper_parameters.optimizer.decay_rate")
def create_feeds(self, is_infer=False):
if is_infer:
analogy_a = paddle.static.data(
name="analogy_a", shape=[None, 1], dtype='int64')
analogy_b = paddle.static.data(
name="analogy_b", shape=[None, 1], dtype='int64')
analogy_c = paddle.static.data(
name="analogy_c", shape=[None, 1], dtype='int64')
#analogy_d = paddle.static.data(
# name="analogy_d", shape=[None], dtype='int64')
return [analogy_a, analogy_b, analogy_c]
input_word = paddle.static.data(
name="input_word", shape=[None, 1], dtype='int64')
true_word = paddle.static.data(
name='true_label', shape=[None, 1], dtype='int64')
if self.with_shuffle_batch:
return [input_word, true_word]
neg_word = paddle.static.data(
name="neg_label", shape=[None, self.neg_num], dtype='int64')
return [input_word, true_word, neg_word]
def net(self, inputs, is_infer=False):
word2vec_model = Word2VecLayer(
self.sparse_feature_number,
self.sparse_feature_dim,
self.neg_num,
emb_name="emb",
emb_w_name="emb_w",
emb_b_name="emb_b")
true_logits, neg_logits = word2vec_model.forward(inputs)
label_ones = paddle.full(
shape=[paddle.shape(true_logits)[0], 1], fill_value=1.0)
label_zeros = paddle.full(
shape=[paddle.shape(true_logits)[0], self.neg_num], fill_value=0.0)
true_logits = paddle.nn.functional.sigmoid(true_logits)
true_xent = paddle.nn.functional.binary_cross_entropy(true_logits,
label_ones)
neg_logits = paddle.nn.functional.sigmoid(neg_logits)
neg_xent = paddle.nn.functional.binary_cross_entropy(neg_logits,
label_zeros)
cost = paddle.add(true_xent, neg_xent)
avg_cost = paddle.mean(x=cost)
self._cost = avg_cost
fetch_dict = {'loss': avg_cost}
return fetch_dict
def create_optimizer(self, strategy=None):
optimizer = paddle.optimizer.SGD(learning_rate=self.learning_rate)
# learning_rate=paddle.fluid.layers.exponential_decay(
# learning_rate=self.learning_rate,
# decay_steps=self.decay_steps,
# decay_rate=self.decay_rate,
# staircase=True))
if strategy != None:
import paddle.distributed.fleet as fleet
optimizer = fleet.distributed_optimizer(optimizer, strategy)
return optimizer
def infer_net(self, input):
#[analogy_a, analogy_b, analogy_c] = inputs
all_label = paddle.static.data(
name="all_label",
shape=[self.sparse_feature_number],
dtype='int64')
word2vec = Word2VecInferLayer(self.sparse_feature_number,
self.sparse_feature_dim, "emb")
val, pred_idx = word2vec.forward(input[0], input[1], input[2],
all_label)
fetch_dict = {'pred_idx': pred_idx}
return fetch_dict
|
test/test_tools.py | cokelaer/sequana | 138 | 14153 | <gh_stars>100-1000
from sequana.tools import bam_to_mapped_unmapped_fastq, reverse_complement, StatsBAM2Mapped
from sequana import sequana_data
from sequana.tools import bam_get_paired_distance, GZLineCounter, PairedFastQ
from sequana.tools import genbank_features_parser
def test_StatsBAM2Mapped():
data = sequana_data("test.bam", "testing")
res = StatsBAM2Mapped(data)
res.to_html()
def test_bam2fastq():
data = sequana_data("test.bam", "testing")
res = bam_to_mapped_unmapped_fastq(data)
def test_reverse_complement():
assert reverse_complement("AACCGGTTA") == 'TAACCGGTT'
def test_reverse():
from sequana.tools import reverse
assert reverse("AACCGG") == 'GGCCAA'
def test_distance():
data = sequana_data("test.bam", "testing")
distances = bam_get_paired_distance(data)
def test_gc_content():
from sequana.tools import gc_content
data = sequana_data('measles.fa', "testing")
gc_content(data, 10)['chr1']
gc_content(data, 101, circular=True)['chr1']
def test_genbank_features_parser():
data = sequana_data("JB409847.gbk")
genbank_features_parser(data)
def test_gzlinecounter():
assert len(GZLineCounter(sequana_data("test.fastq.gz"))) == 1000
def test_paired_file():
f1 = sequana_data("test.fastq.gz")
f2 = sequana_data("test.fastq.gz")
assert PairedFastQ(f1,f2).is_synchronised()
|
configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py | grimoire/mmdeploy | 746 | 14156 | <filename>configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py
_base_ = ['./rotated-detection_static.py', '../_base_/backends/tensorrt.py']
onnx_config = dict(
output_names=['dets', 'labels'],
input_shape=None,
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
},
)
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 1024, 1024],
max_shape=[1, 3, 1024, 1024])))
])
|
datasets.py | Tracesource/DCEC | 154 | 14168 | import numpy as np
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x = np.concatenate((x_train, x_test))
y = np.concatenate((y_train, y_test))
x = x.reshape(-1, 28, 28, 1).astype('float32')
x = x/255.
print('MNIST:', x.shape)
return x, y
def load_usps(data_path='./data/usps'):
import os
if not os.path.exists(data_path+'/usps_train.jf'):
if not os.path.exists(data_path+'/usps_train.jf.gz'):
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s' % data_path)
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s' % data_path)
os.system('gunzip %s/usps_train.jf.gz' % data_path)
os.system('gunzip %s/usps_test.jf.gz' % data_path)
with open(data_path + '/usps_train.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_train, labels_train = data[:, 1:], data[:, 0]
with open(data_path + '/usps_test.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_test, labels_test = data[:, 1:], data[:, 0]
x = np.concatenate((data_train, data_test)).astype('float32')
x /= 2.0
x = x.reshape([-1, 16, 16, 1])
y = np.concatenate((labels_train, labels_test))
print('USPS samples', x.shape)
return x, y
|
tests/integration_tests/framework/flask_utils.py | ilan-WS/cloudify-manager | 124 | 14197 | <reponame>ilan-WS/cloudify-manager<gh_stars>100-1000
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from cloudify.utils import setup_logger
from integration_tests.framework.docker import (execute,
copy_file_to_manager)
from integration_tests.tests.constants import MANAGER_PYTHON
from integration_tests.tests.utils import get_resource
logger = setup_logger('Flask Utils', logging.INFO)
security_config = None
PREPARE_SCRIPT_PATH = '/tmp/prepare_reset_storage.py'
SCRIPT_PATH = '/tmp/reset_storage.py'
CONFIG_PATH = '/tmp/reset_storage_config.json'
def prepare_reset_storage_script(container_id):
reset_script = get_resource('scripts/reset_storage.py')
prepare = get_resource('scripts/prepare_reset_storage.py')
copy_file_to_manager(container_id, reset_script, SCRIPT_PATH)
copy_file_to_manager(container_id, prepare, PREPARE_SCRIPT_PATH)
execute(container_id,
[MANAGER_PYTHON, PREPARE_SCRIPT_PATH, '--config', CONFIG_PATH])
def reset_storage(container_id):
logger.info('Resetting PostgreSQL DB')
# reset the storage by calling a script on the manager, to access
# localhost-only APIs (rabbitmq management api)
execute(container_id,
[MANAGER_PYTHON, SCRIPT_PATH, '--config', CONFIG_PATH])
def set_ldap(config_data):
logger.info('Setting LDAP configuration')
_prepare_set_ldap_script()
execute("{manager_python} {script_path} --config '{cfg_data}'"
.format(manager_python=MANAGER_PYTHON,
script_path='/tmp/set_ldap.py',
cfg_data=json.dumps(config_data)))
def _prepare_set_ldap_script():
set_ldap_script = get_resource('scripts/set_ldap.py')
copy_file_to_manager(set_ldap_script, '/tmp/set_ldap.py')
|
test/talker.py | cjds/rosgo | 148 | 14211 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
def talker():
pub = rospy.Publisher('chatter', String)
rospy.init_node('talker', anonymous=True)
while not rospy.is_shutdown():
str = "%s: hello world %s" % (rospy.get_name(), rospy.get_time())
rospy.loginfo(str)
pub.publish(String(str))
rospy.sleep(1.0)
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
tests/io/test_kepseismic.py | jorgemarpa/lightkurve | 235 | 14217 | import pytest
from astropy.io import fits
import numpy as np
from lightkurve.io.kepseismic import read_kepseismic_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_detect_kepseismic():
"""Can we detect the correct format for KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
f = fits.open(url)
assert detect_filetype(f) == "KEPSEISMIC"
@pytest.mark.remote_data
def test_read_kepseismic():
"""Can we read KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
with fits.open(url, mode="readonly") as hdulist:
fluxes = hdulist[1].data["FLUX"]
lc = read_kepseismic_lightcurve(url)
flux_lc = lc.flux.value
# print(flux_lc, fluxes)
assert np.sum(fluxes) == np.sum(flux_lc)
|
recipes/Python/223585_Stable_deep_sorting_dottedindexed_attributes/recipe-223585.py | tdiprima/code | 2,023 | 14219 | <gh_stars>1000+
def sortByAttrs(seq, attrs):
listComp = ['seq[:] = [(']
for attr in attrs:
listComp.append('seq[i].%s, ' % attr)
listComp.append('i, seq[i]) for i in xrange(len(seq))]')
exec('%s' % ''.join(listComp))
seq.sort()
seq[:] = [obj[-1] for obj in seq]
return
#
# begin test code
#
from random import randint
class a:
def __init__(self):
self.x = (randint(1, 5), randint(1, 5))
class b:
def __init__(self):
self.x = randint(1, 5)
self.y = (a(), a())
class c:
def __init__(self, arg):
self.x = arg
self.y = b()
if __name__ == '__main__':
aList = [c(1), c(2), c(3), c(4), c(5), c(6)]
print '\n...to be sorted by obj.y.y[1].x[1]'
print ' then, as needed, by obj.y.x'
print ' then, as needed, by obj.x\n\n ',
for i in range(6):
print '(' + str(aList[i].y.y[1].x[1]) + ',',
print str(aList[i].y.x) + ',',
print str(aList[i].x) + ') ',
sortByAttrs(aList, ['y.y[1].x[1]', 'y.x', 'x'])
print '\n\n...now sorted by listed attributes.\n\n ',
for i in range(6):
print '(' + str(aList[i].y.y[1].x[1]) + ',',
print str(aList[i].y.x) + ',',
print str(aList[i].x) + ') ',
print
#
# end test code
#
|
spirit/topic/forms.py | ImaginaryLandscape/Spirit | 974 | 14220 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import gettext_lazy as _
from django.utils.encoding import smart_bytes
from django.utils import timezone
from ..core import utils
from ..core.utils.forms import NestedModelChoiceField
from ..category.models import Category
from .models import Topic
class TopicForm(forms.ModelForm):
topic_hash = forms.CharField(
max_length=32,
widget=forms.HiddenInput,
required=False)
class Meta:
model = Topic
fields = ('title', 'category')
def __init__(self, user, *args, **kwargs):
super(TopicForm, self).__init__(*args, **kwargs)
self.user = user
self.fields['category'] = NestedModelChoiceField(
queryset=Category.objects.visible().opened().ordered(),
related_name='category_set',
parent_field='parent_id',
label_field='title',
label=_("Category"),
empty_label=_("Choose a category"))
if self.instance.pk and not user.st.is_moderator:
del self.fields['category']
def get_category(self):
return self.cleaned_data['category']
def get_topic_hash(self):
topic_hash = self.cleaned_data.get('topic_hash', None)
if topic_hash:
return topic_hash
return utils.get_hash((
smart_bytes(self.cleaned_data['title']),
smart_bytes('category-{}'.format(self.cleaned_data['category'].pk))))
def save(self, commit=True):
if not self.instance.pk:
self.instance.user = self.user
self.instance.reindex_at = timezone.now()
return super(TopicForm, self).save(commit)
|
package_manager/util_test.py | shahriak/dotnet5 | 10,302 | 14242 | import unittest
import os
from six import StringIO
from package_manager import util
CHECKSUM_TXT = "1915adb697103d42655711e7b00a7dbe398a33d7719d6370c01001273010d069"
DEBIAN_JESSIE_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="8"
VERSION="Debian GNU/Linux 8 (jessie)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
DEBIAN_STRETCH_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="9"
VERSION="Debian GNU/Linux 9 (stretch)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
DEBIAN_BUSTER_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="10"
VERSION="Debian GNU/Linux 10 (buster)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
# VERSION and VERSION_ID aren't set on unknown distros
DEBIAN_UNKNOWN_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
osReleaseForDistro = {
"jessie": DEBIAN_JESSIE_OS_RELEASE,
"stretch": DEBIAN_STRETCH_OS_RELEASE,
"buster": DEBIAN_BUSTER_OS_RELEASE,
"???": DEBIAN_UNKNOWN_OS_RELEASE,
}
class TestUtil(unittest.TestCase):
def test_sha256(self):
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'testdata', 'checksum.txt')
actual = util.sha256_checksum(filename)
self.assertEqual(CHECKSUM_TXT, actual)
def test_generate_debian_os_release(self):
for distro, expected_output in osReleaseForDistro.items():
output_file = StringIO()
util.generate_os_release(distro, output_file)
self.assertEqual(expected_output, output_file.getvalue())
if __name__ == '__main__':
unittest.main()
|
homeassistant/components/smarthab/light.py | tbarbette/core | 22,481 | 14267 | <gh_stars>1000+
"""Support for SmartHab device integration."""
from datetime import timedelta
import logging
import pysmarthab
from requests.exceptions import Timeout
from homeassistant.components.light import LightEntity
from . import DATA_HUB, DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=60)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up SmartHab lights from a config entry."""
hub = hass.data[DOMAIN][config_entry.entry_id][DATA_HUB]
entities = (
SmartHabLight(light)
for light in await hub.async_get_device_list()
if isinstance(light, pysmarthab.Light)
)
async_add_entities(entities, True)
class SmartHabLight(LightEntity):
"""Representation of a SmartHab Light."""
def __init__(self, light):
"""Initialize a SmartHabLight."""
self._light = light
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._light.device_id
@property
def name(self) -> str:
"""Return the display name of this light."""
return self._light.label
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._light.state
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
await self._light.async_turn_on()
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
await self._light.async_turn_off()
async def async_update(self):
"""Fetch new state data for this light."""
try:
await self._light.async_update()
except Timeout:
_LOGGER.error(
"Reached timeout while updating light %s from API", self.entity_id
)
|
src/python/pants/backend/docker/util_rules/docker_build_context_test.py | pantsbuild/pants | 1,806 | 14300 | <gh_stars>1000+
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from typing import Any, ContextManager
import pytest
from pants.backend.docker.goals import package_image
from pants.backend.docker.subsystems import dockerfile_parser
from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.util_rules import (
dependencies,
docker_binary,
docker_build_args,
docker_build_context,
docker_build_env,
dockerfile,
)
from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs
from pants.backend.docker.util_rules.docker_build_context import (
DockerBuildContext,
DockerBuildContextRequest,
)
from pants.backend.docker.util_rules.docker_build_env import DockerBuildEnvironment
from pants.backend.docker.value_interpolation import (
DockerBuildArgsInterpolationValue,
DockerInterpolationContext,
DockerInterpolationValue,
)
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.shell.target_types import ShellSourcesGeneratorTarget, ShellSourceTarget
from pants.backend.shell.target_types import rules as shell_target_types_rules
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FilesGeneratorTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST, EMPTY_SNAPSHOT, Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.testutil.pytest_util import no_exception
from pants.testutil.rule_runner import QueryRule, RuleRunner
def create_rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*core_target_types_rules(),
*dependencies.rules(),
*docker_binary.rules(),
*docker_build_args.rules(),
*docker_build_context.rules(),
*docker_build_env.rules(),
*dockerfile.rules(),
*dockerfile_parser.rules(),
*package_image.rules(),
*package_pex_binary.rules(),
*pex_from_targets.rules(),
*shell_target_types_rules(),
*target_types_rules.rules(),
QueryRule(BuiltPackage, [PexBinaryFieldSet]),
QueryRule(DockerBuildContext, (DockerBuildContextRequest,)),
],
target_types=[
DockerImageTarget,
FilesGeneratorTarget,
PexBinary,
ShellSourcesGeneratorTarget,
ShellSourceTarget,
],
)
return rule_runner
@pytest.fixture
def rule_runner() -> RuleRunner:
return create_rule_runner()
def assert_build_context(
rule_runner: RuleRunner,
address: Address,
*,
build_upstream_images: bool = False,
expected_files: list[str],
expected_interpolation_context: dict[str, dict[str, str] | DockerInterpolationValue]
| None = None,
pants_args: list[str] | None = None,
runner_options: dict[str, Any] | None = None,
) -> DockerBuildContext:
if runner_options is None:
runner_options = {}
runner_options.setdefault("env_inherit", set()).update({"PATH", "PYENV_ROOT", "HOME"})
rule_runner.set_options(pants_args or [], **runner_options)
context = rule_runner.request(
DockerBuildContext,
[
DockerBuildContextRequest(
address=address,
build_upstream_images=build_upstream_images,
)
],
)
snapshot = rule_runner.request(Snapshot, [context.digest])
assert sorted(expected_files) == sorted(snapshot.files)
if expected_interpolation_context is not None:
if "build_args" in expected_interpolation_context:
expected_interpolation_context["build_args"] = DockerBuildArgsInterpolationValue(
expected_interpolation_context["build_args"]
)
assert context.interpolation_context == DockerInterpolationContext.from_dict(
expected_interpolation_context
)
return context
def test_file_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
# img_A -> files_A
# img_A -> img_B
"src/a/BUILD": dedent(
"""\
docker_image(name="img_A", dependencies=[":files_A", "src/b:img_B"])
files(name="files_A", sources=["files/**"])
"""
),
"src/a/Dockerfile": "FROM base",
"src/a/files/a01": "",
"src/a/files/a02": "",
# img_B -> files_B
"src/b/BUILD": dedent(
"""\
docker_image(name="img_B", dependencies=[":files_B"])
files(name="files_B", sources=["files/**"])
"""
),
"src/b/Dockerfile": "FROM base",
"src/b/files/b01": "",
"src/b/files/b02": "",
# Mixed
"src/c/BUILD": dedent(
"""\
docker_image(name="img_C", dependencies=["src/a:files_A", "src/b:files_B"])
"""
),
"src/c/Dockerfile": "FROM base",
}
)
# We want files_B in build context for img_B
assert_build_context(
rule_runner,
Address("src/b", target_name="img_B"),
expected_files=["src/b/Dockerfile", "src/b/files/b01", "src/b/files/b02"],
)
# We want files_A in build context for img_A, but not files_B
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=["src/a/Dockerfile", "src/a/files/a01", "src/a/files/a02"],
)
# Mixed.
assert_build_context(
rule_runner,
Address("src/c", target_name="img_C"),
expected_files=[
"src/c/Dockerfile",
"src/a/files/a01",
"src/a/files/a02",
"src/b/files/b01",
"src/b/files/b02",
],
)
def test_from_image_build_arg_dependency(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/upstream/BUILD": dedent(
"""\
docker_image(
name="image",
repository="upstream/{name}",
instructions=["FROM alpine"],
)
"""
),
"src/downstream/BUILD": "docker_image(name='image')",
"src/downstream/Dockerfile": dedent(
"""\
ARG BASE_IMAGE=src/upstream:image
FROM $BASE_IMAGE
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/downstream", target_name="image"),
expected_files=["src/downstream/Dockerfile"],
build_upstream_images=True,
expected_interpolation_context={
"baseimage": {"tag": "latest"},
"stage0": {"tag": "latest"},
"build_args": {
"BASE_IMAGE": "upstream/image:latest",
},
},
)
def test_files_out_of_tree(rule_runner: RuleRunner) -> None:
# src/a:img_A -> res/static:files
rule_runner.write_files(
{
"src/a/BUILD": dedent(
"""\
docker_image(name="img_A", dependencies=["res/static:files"])
"""
),
"res/static/BUILD": dedent(
"""\
files(name="files", sources=["!BUILD", "**/*"])
"""
),
"src/a/Dockerfile": "FROM base",
"res/static/s01": "",
"res/static/s02": "",
"res/static/sub/s03": "",
}
)
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=[
"src/a/Dockerfile",
"res/static/s01",
"res/static/s02",
"res/static/sub/s03",
],
)
def test_packaged_pex_path(rule_runner: RuleRunner) -> None:
# This test is here to ensure that we catch if there is any change in the generated path where
# built pex binaries go, as we rely on that for dependency inference in the Dockerfile.
rule_runner.write_files(
{
"src/docker/BUILD": """docker_image(dependencies=["src/python/proj/cli:bin"])""",
"src/docker/Dockerfile": """FROM python""",
"src/python/proj/cli/BUILD": """pex_binary(name="bin", entry_point="main.py")""",
"src/python/proj/cli/main.py": """print("cli main")""",
}
)
assert_build_context(
rule_runner,
Address("src/docker", target_name="docker"),
expected_files=["src/docker/Dockerfile", "src.python.proj.cli/bin.pex"],
)
def test_interpolation_context_from_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": "docker_image()",
"src/docker/Dockerfile": dedent(
"""\
FROM python:3.8
FROM alpine as interim
FROM interim
FROM scratch:1-1 as output
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
expected_interpolation_context={
"baseimage": {"tag": "3.8"},
"stage0": {"tag": "3.8"},
"interim": {"tag": "latest"},
"stage2": {"tag": "latest"},
"output": {"tag": "1-1"},
"build_args": {},
},
)
def test_synthetic_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
instructions=[
"FROM python:3.8",
"FROM alpine as interim",
"FROM interim",
"FROM scratch:1-1 as output",
]
)
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile.docker"],
expected_interpolation_context={
"baseimage": {"tag": "3.8"},
"stage0": {"tag": "3.8"},
"interim": {"tag": "latest"},
"stage2": {"tag": "latest"},
"output": {"tag": "1-1"},
"build_args": {},
},
)
def test_shell_source_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(dependencies=[":entrypoint", ":shell"])
shell_source(name="entrypoint", source="entrypoint.sh")
shell_sources(name="shell", sources=["scripts/**/*.sh"])
"""
),
"src/docker/Dockerfile": "FROM base",
"src/docker/entrypoint.sh": "",
"src/docker/scripts/s01.sh": "",
"src/docker/scripts/s02.sh": "",
"src/docker/scripts/random.file": "",
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=[
"src/docker/Dockerfile",
"src/docker/entrypoint.sh",
"src/docker/scripts/s01.sh",
"src/docker/scripts/s02.sh",
],
)
def test_build_arg_defaults_from_dockerfile(rule_runner: RuleRunner) -> None:
# Test that only explicitly defined build args in the BUILD file or pants configuraiton use the
# environment for its values.
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
extra_build_args=[
"base_version",
]
)
"""
),
"src/docker/Dockerfile": dedent(
"""\
ARG base_name=python
ARG base_version=3.8
FROM ${base_name}:${base_version}
ARG NO_DEF
ENV opt=${NO_DEF}
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
runner_options={
"env": {
"base_name": "no-effect",
"base_version": "3.9",
},
},
expected_files=["src/docker/Dockerfile"],
expected_interpolation_context={
"baseimage": {"tag": "${base_version}"},
"stage0": {"tag": "${base_version}"},
"build_args": {
# `base_name` is not listed here, as it was not an explicitly defined build arg.
"base_version": "3.9",
},
},
)
@pytest.mark.parametrize(
"dockerfile_arg_value, extra_build_arg_value, expect",
[
pytest.param(None, None, no_exception(), id="No args defined"),
pytest.param(
None,
"",
pytest.raises(ExecutionError, match=r"variable 'MY_ARG' is undefined"),
id="No default value for build arg",
),
pytest.param(None, "some default value", no_exception(), id="Default value for build arg"),
pytest.param("", None, no_exception(), id="No build arg defined, and ARG without default"),
pytest.param(
"",
"",
pytest.raises(ExecutionError, match=r"variable 'MY_ARG' is undefined"),
id="No default value from ARG",
),
pytest.param(
"", "some default value", no_exception(), id="Default value for build arg, ARG present"
),
pytest.param(
"some default value", None, no_exception(), id="No build arg defined, only ARG"
),
pytest.param("some default value", "", no_exception(), id="Default value from ARG"),
pytest.param(
"some default value",
"some other default",
no_exception(),
id="Default value for build arg, ARG default",
),
],
)
def test_undefined_env_var_behavior(
rule_runner: RuleRunner,
dockerfile_arg_value: str | None,
extra_build_arg_value: str | None,
expect: ContextManager,
) -> None:
dockerfile_arg = ""
if dockerfile_arg_value is not None:
dockerfile_arg = "ARG MY_ARG"
if dockerfile_arg_value:
dockerfile_arg += f"={dockerfile_arg_value}"
extra_build_args = ""
if extra_build_arg_value is not None:
extra_build_args = 'extra_build_args=["MY_ARG'
if extra_build_arg_value:
extra_build_args += f"={extra_build_arg_value}"
extra_build_args += '"],'
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
f"""\
docker_image(
{extra_build_args}
)
"""
),
"src/docker/Dockerfile": dedent(
f"""\
FROM python:3.8
{dockerfile_arg}
"""
),
}
)
with expect:
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
)
@pytest.fixture(scope="session")
def build_context() -> DockerBuildContext:
rule_runner = create_rule_runner()
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
extra_build_args=["DEF_ARG"],
instructions=[
"FROM python:3.8",
"ARG MY_ARG",
"ARG DEF_ARG=some-value",
],
)
"""
),
}
)
return assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile.docker"],
)
@pytest.mark.parametrize(
"fmt_string, result, expectation",
[
pytest.param(
"{build_args.MY_ARG}",
None,
pytest.raises(
ValueError,
match=(r"The build arg 'MY_ARG' is undefined\. Defined build args are: DEF_ARG\."),
),
id="ARG_NAME",
),
pytest.param(
"{build_args.DEF_ARG}",
"some-value",
no_exception(),
id="DEF_ARG",
),
],
)
def test_build_arg_behavior(
build_context: DockerBuildContext,
fmt_string: str,
result: str | None,
expectation: ContextManager,
) -> None:
with expectation:
assert fmt_string.format(**build_context.interpolation_context) == result
def test_create_docker_build_context() -> None:
context = DockerBuildContext.create(
build_args=DockerBuildArgs.from_strings("ARGNAME=value1"),
snapshot=EMPTY_SNAPSHOT,
build_env=DockerBuildEnvironment.create({"ENVNAME": "value2"}),
dockerfile_info=DockerfileInfo(
address=Address("test"),
digest=EMPTY_DIGEST,
source="test/Dockerfile",
putative_target_addresses=(),
version_tags=("base latest", "stage1 1.2", "dev 2.0", "prod 2.0"),
build_args=DockerBuildArgs.from_strings(),
from_image_build_arg_names=(),
copy_sources=(),
),
)
assert list(context.build_args) == ["ARGNAME=value1"]
assert dict(context.build_env.environment) == {"ENVNAME": "value2"}
assert context.dockerfile == "test/Dockerfile"
assert context.stages == ("base", "dev", "prod")
|
deeplab_resnet/__init__.py | tramper2/SIGGRAPH18SSS | 390 | 14302 | from .model import DeepLabResNetModel
from .hc_deeplab import HyperColumn_Deeplabv2
from .image_reader import ImageReader, read_data_list, get_indicator_mat, get_batch_1chunk, read_an_image_from_disk, tf_wrap_get_patch, get_batch
from .utils import decode_labels, inv_preprocess, prepare_label |
joplin_web/api.py | foxmask/joplin-web | 382 | 14389 | # coding: utf-8
"""
joplin-web
"""
from django.conf import settings
from django.http.response import JsonResponse
from django.urls import reverse
from joplin_api import JoplinApiSync
from joplin_web.utils import nb_notes_by_tag, nb_notes_by_folder
import logging
from rich import console
console = console.Console()
logger = logging.getLogger("joplin_web.app")
joplin = JoplinApiSync(token=settings.JOPLIN_WEBCLIPPER_TOKEN)
def get_folders(request):
"""
all the folders
:param request
:return: json
"""
res = joplin.get_folders()
json_data = sorted(res.json(), key=lambda k: k['title'])
data = nb_notes_by_folder(json_data)
logger.debug(data)
return JsonResponse(data, safe=False)
def get_tags(request):
res = joplin.get_tags()
json_data = sorted(res.json(), key=lambda k: k['title'])
data = nb_notes_by_tag(json_data)
return JsonResponse(data, safe=False)
|
xs/layers/ops.py | eLeVeNnN/xshinnosuke | 290 | 14422 | from .base import *
class Input(Layer):
def __init__(self, input_shape: Union[List, Tuple], **kwargs):
super(Input, self).__init__(input_shape=input_shape, **kwargs)
self._shape = input_shape
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = x
return self._data
class Reshape(Layer):
def __init__(self, shape: Tuple, **kwargs):
super().__init__(shape=shape, **kwargs)
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = F.view(x, (-1, ) + self._shape, self._data)
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
return self._shape
class ZeroPadding2D(Layer):
def __init__(self, padding, **kwargs):
self.padding = padding
super(ZeroPadding2D, self).__init__(**kwargs)
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = F.pad2d(x, self.padding, self._data)
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
self._shape = (input_shape[0], input_shape[1] + 2 * self.padding[0], input_shape[2] + 2 * self.padding[1])
return self._shape
class Add(Layer):
def __call__(self, inbounds: List[Layer], *args, **kwargs):
for inbound in inbounds:
self._in_bounds.append(inbound)
inbound.add_out_bounds(self)
self._shape = inbound.shape
return self
def init_layer_out_tensor(self, x : F.Tensor = None):
x = self._in_bounds[0].data if x is None else x
if self._data is None or x.shape[0] > self._data.shape_capacity[0]:
self._data = Zeros()((x.shape[0],) + self.shape, requires_grad=self.trainable)
self._data.to('static')
for in_bound in self._in_bounds:
self._data.add_in_bounds(in_bound.data)
elif x.shape[0] < self._data.shape_capacity[0]:
if GLOBAL.TRAINING:
self._data.slices(slice(None, x.shape[0], None))
else:
self._data = Zeros()((x.shape[0],) + self.shape, requires_grad=self.trainable)
self._data.to('static')
for in_bound in self._in_bounds:
self._data.add_in_bounds(in_bound.data)
else:
self._data.slices(slice(None, None, None))
def forward(self, x: F.Tensor = None, *args, **kwargs) -> F.Tensor:
self._data.zero_()
for in_bound in self._in_bounds:
GLOBAL.np.add(self._data.eval, in_bound.data.eval, out=self._data.eval)
if GLOBAL.TRAINING and in_bound.data.requires_grad:
initialize_ops_grad(in_bound.data)
self._data.requires_grad = self._data.requires_grad or in_bound.data.requires_grad
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
return self._shape
def backward(self, gradients: F.Tensor = None):
for in_bound in self._in_bounds:
if in_bound.data.requires_grad:
GLOBAL.np.add(in_bound.data.grad.eval, self._data.grad.eval, out=in_bound.data.grad.eval)
self._data.zero_grad()
|
torchx/examples/apps/lightning_classy_vision/test/component_test.py | LaudateCorpus1/torchx | 101 | 14438 | <reponame>LaudateCorpus1/torchx<gh_stars>100-1000
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torchx.examples.apps.lightning_classy_vision.component as lightning_classy_vision
from torchx.components.component_test_base import ComponentTestCase
class DistributedComponentTest(ComponentTestCase):
def test_trainer(self) -> None:
self.validate(lightning_classy_vision, "trainer")
def test_interpret(self) -> None:
self.validate(lightning_classy_vision, "interpret")
|
third_party/xiuminglib/xiuminglib/vis/video.py | leehsiu/nerfactor | 183 | 14440 | <filename>third_party/xiuminglib/xiuminglib/vis/video.py
from os.path import join, dirname
import numpy as np
from .text import put_text
from .. import const
from ..os import makedirs
from ..imprt import preset_import
from ..log import get_logger
logger = get_logger()
def make_video(
imgs, fps=24, outpath=None, method='matplotlib', dpi=96, bitrate=-1):
"""Writes a list of images into a grayscale or color video.
Args:
imgs (list(numpy.ndarray)): Each image should be of type ``uint8`` or
``uint16`` and of shape H-by-W (grayscale) or H-by-W-by-3 (RGB).
fps (int, optional): Frame rate.
outpath (str, optional): Where to write the video to (a .mp4 file).
``None`` means
``os.path.join(const.Dir.tmp, 'make_video.mp4')``.
method (str, optional): Method to use: ``'matplotlib'``, ``'opencv'``,
``'video_api'``.
dpi (int, optional): Dots per inch when using ``matplotlib``.
bitrate (int, optional): Bit rate in kilobits per second when using
``matplotlib``; reasonable values include 7200.
Writes
- A video of the images.
"""
if outpath is None:
outpath = join(const.Dir.tmp, 'make_video.mp4')
makedirs(dirname(outpath))
assert imgs, "Frame list is empty"
for frame in imgs:
assert np.issubdtype(frame.dtype, np.unsignedinteger), \
"Image type must be unsigned integer"
h, w = imgs[0].shape[:2]
for frame in imgs[1:]:
assert frame.shape[:2] == (h, w), \
"All frames must have the same shape"
if method == 'matplotlib':
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import animation
w_in, h_in = w / dpi, h / dpi
fig = plt.figure(figsize=(w_in, h_in))
Writer = animation.writers['ffmpeg'] # may require you to specify path
writer = Writer(fps=fps, bitrate=bitrate)
def img_plt(arr):
img_plt_ = plt.imshow(arr)
ax = plt.gca()
ax.set_position([0, 0, 1, 1])
ax.set_axis_off()
return img_plt_
anim = animation.ArtistAnimation(fig, [(img_plt(x),) for x in imgs])
anim.save(outpath, writer=writer)
# If obscure error like "ValueError: Invalid file object: <_io.Buff..."
# occurs, consider upgrading matplotlib so that it prints out the real,
# underlying ffmpeg error
plt.close('all')
elif method == 'opencv':
cv2 = preset_import('cv2', assert_success=True)
# TODO: debug codecs (see http://www.fourcc.org/codecs.php)
if outpath.endswith('.mp4'):
# fourcc = cv2.VideoWriter_fourcc(*'MJPG')
# fourcc = cv2.VideoWriter_fourcc(*'X264')
fourcc = cv2.VideoWriter_fourcc(*'H264')
# fourcc = 0x00000021
elif outpath.endswith('.avi'):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
else:
raise NotImplementedError("Video type of\n\t%s" % outpath)
vw = cv2.VideoWriter(outpath, fourcc, fps, (w, h))
for frame in imgs:
if frame.ndim == 3:
frame = frame[:, :, ::-1] # cv2 uses BGR
vw.write(frame)
vw.release()
elif method == 'video_api':
video_api = preset_import('video_api', assert_success=True)
assert outpath.endswith('.webm'), "`video_api` requires .webm"
with video_api.write(outpath, fps=fps) as h:
for frame in imgs:
if frame.ndim == 3 and frame.shape[2] == 4:
frame = frame[:, :, :3]
#frame = frame.astype(np.ubyte)
h.add_frame(frame)
else:
raise ValueError(method)
logger.debug("Images written as a video to:\n%s", outpath)
def make_comparison_video(
imgs1, imgs2, bar_width=4, bar_color=(1, 0, 0), sweep_vertically=False,
sweeps=1, label1='', label2='', font_size=None, font_ttf=None,
label1_top_left_xy=None, label2_top_left_xy=None, **make_video_kwargs):
"""Writes two lists of images into a comparison video that toggles between
two videos with a sweeping bar.
Args:
imgs? (list(numpy.ndarray)): Each image should be of type ``uint8`` or
``uint16`` and of shape H-by-W (grayscale) or H-by-W-by-3 (RGB).
bar_width (int, optional): Width of the sweeping bar.
bar_color (tuple(float), optional): Bar and label RGB, normalized to
:math:`[0,1]`. Defaults to red.
sweep_vertically (bool, optional): Whether to sweep vertically or
horizontally.
sweeps (int, optional): Number of sweeps.
label? (str, optional): Label for each video.
font_size (int, optional): Font size.
font_ttf (str, optional): Path to the .ttf font file. Defaults to Arial.
label?_top_left_xy (tuple(int), optional): The XY coordinate of the
label's top left corner.
make_video_kwargs (dict, optional): Keyword arguments for
:func:`make_video`.
Writes
- A comparison video.
"""
# Bar is perpendicular to sweep-along
sweep_along = 0 if sweep_vertically else 1
bar_along = 1 if sweep_vertically else 0
# Number of frames
n_frames = len(imgs1)
assert n_frames == len(imgs2), \
"Videos to be compared have different numbers of frames"
img_shape = imgs1[0].shape
# Bar color according to image dtype
img_dtype = imgs1[0].dtype
bar_color = np.array(bar_color, dtype=img_dtype)
if np.issubdtype(img_dtype, np.integer):
bar_color *= np.iinfo(img_dtype).max
# Map from frame index to bar location, considering possibly multiple trips
bar_locs = []
for i in range(sweeps):
ind = np.arange(0, img_shape[sweep_along])
if i % 2 == 1: # reverse every other trip
ind = ind[::-1]
bar_locs.append(ind)
bar_locs = np.hstack(bar_locs) # all possible locations
ind = np.linspace(0, len(bar_locs) - 1, num=n_frames, endpoint=True)
bar_locs = [bar_locs[int(x)] for x in ind] # uniformly sampled
# Label locations
if label1_top_left_xy is None:
# Label 1 at top left corner
label1_top_left_xy = (int(0.1 * img_shape[1]), int(0.05 * img_shape[0]))
if label2_top_left_xy is None:
if sweep_vertically:
# Label 2 at bottom left corner
label2_top_left_xy = (
int(0.1 * img_shape[1]), int(0.75 * img_shape[0]))
else:
# Label 2 at top right corner
label2_top_left_xy = (
int(0.7 * img_shape[1]), int(0.05 * img_shape[0]))
frames = []
for i, (img1, img2) in enumerate(zip(imgs1, imgs2)):
assert img1.shape == img_shape, f"`imgs1[{i}]` has a differnet shape"
assert img2.shape == img_shape, f"`imgs2[{i}]` has a differnet shape"
assert img1.dtype == img_dtype, f"`imgs1[{i}]` has a differnet dtype"
assert img2.dtype == img_dtype, f"`imgs2[{i}]` has a differnet dtype"
# Label the two images
img1 = put_text(
img1, label1, label_top_left_xy=label1_top_left_xy,
font_size=font_size, font_color=bar_color, font_ttf=font_ttf)
img2 = put_text(
img2, label2, label_top_left_xy=label2_top_left_xy,
font_size=font_size, font_color=bar_color, font_ttf=font_ttf)
# Bar start and end
bar_loc = bar_locs[i]
bar_width_half = bar_width // 2
bar_start = max(0, bar_loc - bar_width_half)
bar_end = min(bar_loc + bar_width_half, img_shape[sweep_along])
# Up to bar start, we show Image 1; bar end onwards, Image 2
img1 = np.take(img1, range(bar_start), axis=sweep_along)
img2 = np.take(
img2, range(bar_end, img_shape[sweep_along]), axis=sweep_along)
# Between the two images, we show the bar
actual_bar_width = img_shape[
sweep_along] - img1.shape[sweep_along] - img2.shape[sweep_along]
reps = [1, 1, 1]
reps[sweep_along] = actual_bar_width
reps[bar_along] = img_shape[bar_along]
bar_img = np.tile(bar_color, reps)
frame = np.concatenate((img1, bar_img, img2), axis=sweep_along)
frames.append(frame)
make_video(frames, **make_video_kwargs)
|
cumulusci/core/config/BaseConfig.py | leboff/CumulusCI | 163 | 14443 | <reponame>leboff/CumulusCI<filename>cumulusci/core/config/BaseConfig.py<gh_stars>100-1000
import logging
class BaseConfig(object):
"""BaseConfig provides a common interface for nested access for all Config objects in CCI."""
defaults = {}
def __init__(self, config=None, keychain=None):
if config is None:
self.config = {}
else:
self.config = config
self._init_logger()
self._load_config()
def _init_logger(self):
"""Initializes self.logger"""
self.logger = logging.getLogger(__name__)
def _load_config(self):
"""Subclasses may override this method to initialize :py:attr:`~config`"""
pass
def __getattr__(self, name):
tree = name.split("__")
if name.startswith("_"):
raise AttributeError(f"Attribute {name} not found")
value = None
value_found = False
config = self.config
if len(tree) > 1:
# Walk through the config dictionary using __ as a delimiter
for key in tree[:-1]:
config = config.get(key)
if config is None:
break
if config and tree[-1] in config:
value = config[tree[-1]]
value_found = True
if value_found:
return value
else:
return self.defaults.get(name)
|
Network/class_func.py | Mobad225/S-DCNet | 153 | 14446 | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# Func1: change density map into count map
# density map: batch size * 1 * w * h
def get_local_count(density_map,psize,pstride):
IF_gpu = torch.cuda.is_available() # if gpu, return gpu
IF_ret_gpu = (density_map.device.type == 'cuda')
psize,pstride = int(psize),int(pstride)
density_map = density_map.cpu().type(torch.float32)
conv_kernel = torch.ones(1,1,psize,psize,dtype = torch.float32)
if IF_gpu:
density_map,conv_kernel = density_map.cuda(),conv_kernel.cuda()
count_map = F.conv2d(density_map,conv_kernel,stride=pstride)
if not IF_ret_gpu:
count_map = count_map.cpu()
return count_map
# Func2: convert count to class (0->c-1)
def Count2Class(count_map,label_indice):
if isinstance(label_indice,np.ndarray):
label_indice = torch.from_numpy(label_indice)
IF_gpu = torch.cuda.is_available()
IF_ret_gpu = (count_map.device.type == 'cuda')
label_indice = label_indice.cpu().type(torch.float32)
cls_num = len(label_indice)+1
cls_map = torch.zeros(count_map.size()).type(torch.LongTensor)
if IF_gpu:
count_map,label_indice,cls_map = count_map.cuda(),label_indice.cuda(),cls_map.cuda()
for i in range(cls_num-1):
if IF_gpu:
cls_map = cls_map + (count_map >= label_indice[i]).cpu().type(torch.LongTensor).cuda()
else:
cls_map = cls_map + (count_map >= label_indice[i]).cpu().type(torch.LongTensor)
if not IF_ret_gpu:
cls_map = cls_map.cpu()
return cls_map
# Func3: convert class (0->c-1) to count number
def Class2Count(pre_cls,label_indice):
'''
# --Input:
# 1.pre_cls is class label range in [0,1,2,...,C-1]
# 2.label_indice not include 0 but the other points
# --Output:
# 1.count value, the same size as pre_cls
'''
if isinstance(label_indice,np.ndarray):
label_indice = torch.from_numpy(label_indice)
label_indice = label_indice.squeeze()
IF_gpu = torch.cuda.is_available()
IF_ret_gpu = (pre_cls.device.type == 'cuda')
# tranform interval to count value map
label2count = [0.0]
for (i,item) in enumerate(label_indice):
if i<label_indice.size()[0]-1:
tmp_count = (label_indice[i]+label_indice[i+1])/2
else:
tmp_count = label_indice[i]
label2count.append(tmp_count)
label2count = torch.tensor(label2count)
label2count = label2count.type(torch.FloatTensor)
#outputs = outputs.max(dim=1)[1].cpu().data
ORI_SIZE = pre_cls.size()
pre_cls = pre_cls.reshape(-1).cpu()
pre_counts = torch.index_select(label2count,0,pre_cls.cpu().type(torch.LongTensor))
pre_counts = pre_counts.reshape(ORI_SIZE)
if IF_ret_gpu:
pre_counts = pre_counts.cuda()
return pre_counts
if __name__ == '__main__':
pre_cls = torch.Tensor([[0,1,2],[3,4,4]])
label_indice =torch.Tensor([0.5,1,1.5,2])
pre_counts = Class2Count(pre_cls,label_indice)
print(pre_cls)
print(label_indice)
print(pre_counts)
pre_cls = Count2Class(pre_counts,label_indice)
print(pre_cls) |
test/fixtures/python/analysis/main1.py | matsubara0507/semantic | 8,844 | 14464 | import a as b
import b.c as e
b.foo(1)
e.baz(1)
|
tests/test_markdown_in_code_cells.py | st--/jupytext | 5,378 | 14469 | <filename>tests/test_markdown_in_code_cells.py
"""Issue #712"""
from nbformat.v4.nbbase import new_code_cell, new_notebook
from jupytext import reads, writes
from jupytext.cell_to_text import three_backticks_or_more
from jupytext.compare import compare, compare_notebooks
from .utils import requires_myst
def test_three_backticks_or_more():
assert three_backticks_or_more([""]) == "```"
assert three_backticks_or_more(["``"]) == "```"
assert three_backticks_or_more(["```python"]) == "````"
assert three_backticks_or_more(["```"]) == "````"
assert three_backticks_or_more(["`````python"]) == "``````"
assert three_backticks_or_more(["`````"]) == "``````"
def test_triple_backticks_in_code_cell(
no_jupytext_version_number,
nb=new_notebook(
metadata={"main_language": "python"},
cells=[
new_code_cell(
'''a = """
```
foo
```
"""'''
)
],
),
text='''---
jupyter:
jupytext:
main_language: python
---
````python
a = """
```
foo
```
"""
````
''',
):
actual_text = writes(nb, fmt="md")
compare(actual_text, text)
actual_nb = reads(text, fmt="md")
compare_notebooks(actual_nb, nb)
@requires_myst
def test_triple_backticks_in_code_cell_myst(
no_jupytext_version_number,
nb=new_notebook(
metadata={"main_language": "python"},
cells=[
new_code_cell(
'''a = """
```
foo
```
"""'''
)
],
),
text='''---
jupytext:
main_language: python
---
````{code-cell}
a = """
```
foo
```
"""
````
''',
):
actual_text = writes(nb, fmt="md:myst")
compare(actual_text, text)
actual_nb = reads(text, fmt="md:myst")
compare_notebooks(actual_nb, nb)
def test_alternate_tree_four_five_backticks(
no_jupytext_version_number,
nb=new_notebook(
metadata={"main_language": "python"},
cells=[
new_code_cell('a = """\n```\n"""'),
new_code_cell("b = 2"),
new_code_cell('c = """\n````\n"""'),
],
),
text='''---
jupyter:
jupytext:
main_language: python
---
````python
a = """
```
"""
````
```python
b = 2
```
`````python
c = """
````
"""
`````
''',
):
actual_text = writes(nb, fmt="md")
compare(actual_text, text)
actual_nb = reads(text, fmt="md")
compare_notebooks(actual_nb, nb)
|
example/cifar10/fast_at.py | KuanKuanQAQ/ares | 206 | 14471 | <reponame>KuanKuanQAQ/ares
''' This file provides a wrapper class for Fast_AT (https://github.com/locuslab/fast_adversarial) model for CIFAR-10 dataset. '''
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import tensorflow as tf
from ares.model.pytorch_wrapper import pytorch_classifier_with_logits
from ares.utils import get_res_path
MODEL_PATH = get_res_path('./cifar10/cifar_model_weights_30_epochs.pth')
def load(_):
model = Fast_AT()
model.load()
return model
@pytorch_classifier_with_logits(n_class=10, x_min=0.0, x_max=1.0,
x_shape=(32, 32, 3), x_dtype=tf.float32, y_dtype=tf.int32)
class Fast_AT(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.model = PreActResNet18().cuda()
self._mean_torch = torch.tensor((0.4914, 0.4822, 0.4465)).view(3,1,1).cuda()
self._std_torch = torch.tensor((0.2471, 0.2435, 0.2616)).view(3,1,1).cuda()
def forward(self, x):
x = x.transpose(1, 2).transpose(1, 3).contiguous()
input_var = (x.cuda() - self._mean_torch) / self._std_torch
labels = self.model(input_var)
return labels.cpu()
def load(self):
checkpoint = torch.load(MODEL_PATH)
self.model.load_state_dict(checkpoint)
self.model.float()
self.model.eval()
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(x) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = nn.BatchNorm2d(512 * block.expansion)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18():
return PreActResNet(PreActBlock, [2,2,2,2])
if __name__ == '__main__':
if not os.path.exists(MODEL_PATH):
if not os.path.exists(os.path.dirname(MODEL_PATH)):
os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
url = 'https://drive.google.com/file/d/1XM-v4hqi9u8EDrQ2xdCo37XXcM9q-R07/view'
print('Please download "{}" to "{}".'.format(url, MODEL_PATH))
|
mayan/apps/linking/tests/test_smart_link_condition_views.py | atitaya1412/Mayan-EDMS | 343 | 14474 | <gh_stars>100-1000
from mayan.apps.testing.tests.base import GenericViewTestCase
from ..events import event_smart_link_edited
from ..permissions import permission_smart_link_edit
from .mixins import (
SmartLinkConditionViewTestMixin, SmartLinkTestMixin,
SmartLinkViewTestMixin
)
class SmartLinkConditionViewTestCase(
SmartLinkConditionViewTestMixin, SmartLinkTestMixin,
SmartLinkViewTestMixin, GenericViewTestCase
):
def setUp(self):
super().setUp()
self._create_test_smart_link()
def test_smart_link_condition_create_view_no_permission(self):
condition_count = self.test_smart_link.conditions.count()
self._clear_events()
response = self._request_test_smart_link_condition_create_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_smart_link.conditions.count(), condition_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_smart_link_condition_create_view_with_access(self):
self.grant_access(
obj=self.test_smart_link, permission=permission_smart_link_edit
)
condition_count = self.test_smart_link.conditions.count()
self._clear_events()
response = self._request_test_smart_link_condition_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_smart_link.conditions.count(), condition_count + 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object, self.test_smart_link_condition
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_smart_link)
self.assertEqual(events[0].verb, event_smart_link_edited.id)
def test_smart_link_condition_delete_view_no_permission(self):
self._create_test_smart_link_condition()
condition_count = self.test_smart_link.conditions.count()
self._clear_events()
response = self._request_test_smart_link_condition_delete_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_smart_link.conditions.count(), condition_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_smart_link_condition_delete_view_with_access(self):
self._create_test_smart_link_condition()
self.grant_access(
obj=self.test_smart_link, permission=permission_smart_link_edit
)
condition_count = self.test_smart_link.conditions.count()
self._clear_events()
response = self._request_test_smart_link_condition_delete_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_smart_link.conditions.count(), condition_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_smart_link)
self.assertEqual(events[0].verb, event_smart_link_edited.id)
def test_smart_link_condition_edit_view_no_permission(self):
self._create_test_smart_link_condition()
instance_values = self._model_instance_to_dictionary(
instance=self.test_smart_link_condition
)
self._clear_events()
response = self._request_test_smart_link_condition_edit_view()
self.assertEqual(response.status_code, 404)
self.test_smart_link_condition.refresh_from_db()
self.assertEqual(
self._model_instance_to_dictionary(
instance=self.test_smart_link_condition
), instance_values
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_smart_link_condition_edit_view_with_access(self):
self._create_test_smart_link_condition()
self.grant_access(
obj=self.test_smart_link, permission=permission_smart_link_edit
)
instance_values = self._model_instance_to_dictionary(
instance=self.test_smart_link_condition
)
self._clear_events()
response = self._request_test_smart_link_condition_edit_view()
self.assertEqual(response.status_code, 302)
self.test_smart_link_condition.refresh_from_db()
self.assertNotEqual(
self._model_instance_to_dictionary(
instance=self.test_smart_link_condition
), instance_values
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object, self.test_smart_link_condition
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_smart_link)
self.assertEqual(events[0].verb, event_smart_link_edited.id)
def test_smart_link_condition_list_view_no_permission(self):
self._create_test_smart_link_condition()
self._clear_events()
response = self._request_test_smart_link_condition_list_view()
self.assertNotContains(
response=response, status_code=404,
text=self.test_smart_link_condition.smart_link.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_smart_link_condition_list_view_with_access(self):
self._create_test_smart_link_condition()
self.grant_access(
obj=self.test_smart_link, permission=permission_smart_link_edit
)
self._clear_events()
response = self._request_test_smart_link_condition_list_view()
self.assertContains(
response=response, status_code=200,
text=self.test_smart_link_condition.smart_link.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
|
notebooks/container/__init__.py | DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020 | 114 | 14476 | <gh_stars>100-1000
from container.base import TimeBase
from container.array import TimeArray, TimeDtype
from container.timeseries import TimeSeries
from container.timeframe import TimeFrame |
bh_tsne/prep_result.py | mr4jay/numerai | 306 | 14502 | import struct
import numpy as np
import pandas as pd
df_train = pd.read_csv('../data/train_data.csv')
df_valid = pd.read_csv('../data/valid_data.csv')
df_test = pd.read_csv('../data/test_data.csv')
with open('result.dat', 'rb') as f:
N, = struct.unpack('i', f.read(4))
no_dims, = struct.unpack('i', f.read(4))
print(N, no_dims)
mappedX = struct.unpack('{}d'.format(N * no_dims), f.read(8 * N * no_dims))
mappedX = np.array(mappedX).reshape((N, no_dims))
print(mappedX)
tsne_train = mappedX[:len(df_train)]
tsne_valid = mappedX[len(df_train):len(df_train)+len(df_valid)]
tsne_test = mappedX[len(df_train)+len(df_valid):]
assert(len(tsne_train) == len(df_train))
assert(len(tsne_valid) == len(df_valid))
assert(len(tsne_test) == len(df_test))
save_path = '../data/tsne_{}d_30p.npz'.format(no_dims)
np.savez(save_path, train=tsne_train, valid=tsne_valid, test=tsne_test)
print('Saved: {}'.format(save_path))
# landmarks, = struct.unpack('{}i'.format(N), f.read(4 * N))
# costs, = struct.unpack('{}d'.format(N), f.read(8 * N))
|
tools_d2/convert-pretrain-model-to-d2.py | nguyentritai2906/panoptic-deeplab | 506 | 14526 | <filename>tools_d2/convert-pretrain-model-to-d2.py
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle as pkl
import sys
import torch
"""
Usage:
# download your pretrained model:
wget https://github.com/LikeLy-Journey/SegmenTron/releases/download/v0.1.0/tf-xception65-270e81cf.pth -O x65.pth
# run the conversion
./convert-pretrained-model-to-d2.py x65.pth x65.pkl
# Then, use x65.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/x65.pkl"
PIXEL_MEAN: [128, 128, 128]
PIXEL_STD: [128, 128, 128]
INPUT:
FORMAT: "RGB"
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
res = {"model": obj, "__author__": "third_party", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
|
pyActionRec/action_flow.py | Xiatian-Zhu/anet2016_cuhk | 253 | 14531 | <reponame>Xiatian-Zhu/anet2016_cuhk
from config import ANET_CFG
import sys
sys.path.append(ANET_CFG.DENSE_FLOW_ROOT+'/build')
from libpydenseflow import TVL1FlowExtractor
import action_caffe
import numpy as np
class FlowExtractor(object):
def __init__(self, dev_id, bound=20):
TVL1FlowExtractor.set_device(dev_id)
self._et = TVL1FlowExtractor(bound)
def extract_flow(self, frame_list, new_size=None):
"""
This function extracts the optical flow and interleave x and y channels
:param frame_list:
:return:
"""
frame_size = frame_list[0].shape[:2]
rst = self._et.extract_flow([x.tostring() for x in frame_list], frame_size[1], frame_size[0])
n_out = len(rst)
if new_size is None:
ret = np.zeros((n_out*2, frame_size[0], frame_size[1]))
for i in xrange(n_out):
ret[2*i, :] = np.fromstring(rst[i][0], dtype='uint8').reshape(frame_size)
ret[2*i+1, :] = np.fromstring(rst[i][1], dtype='uint8').reshape(frame_size)
else:
import cv2
ret = np.zeros((n_out*2, new_size[1], new_size[0]))
for i in xrange(n_out):
ret[2*i, :] = cv2.resize(np.fromstring(rst[i][0], dtype='uint8').reshape(frame_size), new_size)
ret[2*i+1, :] = cv2.resize(np.fromstring(rst[i][1], dtype='uint8').reshape(frame_size), new_size)
return ret
if __name__ == "__main__":
import cv2
im1 = cv2.imread('../data/img_1.jpg')
im2 = cv2.imread('../data/img_2.jpg')
f = FlowExtractor(0)
flow_frames = f.extract_flow([im1, im2])
from pylab import *
plt.figure()
plt.imshow(flow_frames[0])
plt.figure()
plt.imshow(flow_frames[1])
plt.figure()
plt.imshow(im1)
plt.show()
print flow_frames
|
apps/auth/views/wxlogin.py | rainydaygit/testtcloudserver | 349 | 14534 | from flask import Blueprint
from apps.auth.business.wxlogin import WxLoginBusiness
from apps.auth.extentions import validation, parse_json_form
from library.api.render import json_detail_render
wxlogin = Blueprint("wxlogin", __name__)
@wxlogin.route('/', methods=['POST'])
@validation('POST:wx_user_code')
def wxuser_index_handler():
"""
@api {post} /v1/wxlogin/ 登录 微信
@apiName WxLogin
@apiGroup 用户
@apiDescription 登录微信
@apiParam {string} user_code 用户编码
@apiParamExample {json} Request-Example:
{
"user_code":"j2qL3QjNXXwa_4A0WJFDNJyPEx88HTHytARgRbr176g"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": {
"token": "<PASSWORD>"
},
"message": ""
}
"""
user_code = parse_json_form('wx_user_code')
ret, data, msg = WxLoginBusiness.get_user(user_code[0])
return json_detail_render(ret, data, msg)
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/star/star_needs_assignment_target_py35.py | ciskoinch8/vimrc | 463 | 14558 | """
Test PEP 0448 -- Additional Unpacking Generalizations
https://www.python.org/dev/peps/pep-0448/
"""
# pylint: disable=superfluous-parens, unnecessary-comprehension
UNPACK_TUPLE = (*range(4), 4)
UNPACK_LIST = [*range(4), 4]
UNPACK_SET = {*range(4), 4}
UNPACK_DICT = {'a': 1, **{'b': '2'}}
UNPACK_DICT2 = {**UNPACK_DICT, "x": 1, "y": 2}
UNPACK_DICT3 = {**{'a': 1}, 'a': 2, **{'a': 3}}
UNPACK_IN_COMP = {elem for elem in (*range(10))} # [star-needs-assignment-target]
|
search_for_similar_images__perceptual_hash__phash/ui/SelectDirBox.py | DazEB2/SimplePyScripts | 117 | 14561 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/gil9red/VideoStreamingWithEncryption/blob/37cf7f501460a286ec44a20db7b2403e8cb05d97/server_GUI_Qt/inner_libs/gui/SelectDirBox.py
import os
from PyQt5.QtWidgets import QWidget, QLineEdit, QLabel, QPushButton, QHBoxLayout, QFileDialog, QStyle
from PyQt5.QtCore import pyqtSignal
class SelectDirBox(QWidget):
valueChanged = pyqtSignal(str)
valueEdited = pyqtSignal(str)
def __init__(self, value='', visible_label=True):
super().__init__()
self._label = QLabel('Directory:')
self._label.setVisible(visible_label)
self._value = QLineEdit()
self._value.textChanged.connect(self.valueChanged.emit)
self._value.textEdited.connect(self.valueEdited.emit)
icon_open_dir = self.style().standardIcon(QStyle.SP_DirOpenIcon)
action_open_dir = self._value.addAction(icon_open_dir, QLineEdit.TrailingPosition)
action_open_dir.setToolTip('Open directory')
action_open_dir.triggered.connect(self._on_open_dir)
self._button_select_path = QPushButton('...')
self._button_select_path.setFixedWidth(24)
self._button_select_path.setToolTip('Select directory')
self._button_select_path.clicked.connect(self._on_select_path)
self.setValue(value)
layout = QHBoxLayout()
layout.setSpacing(5)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self._label)
layout.addWidget(self._value, stretch=1)
layout.addWidget(self._button_select_path)
self.setLayout(layout)
def setValue(self, value: str):
self._value.setText(value)
self._value.setToolTip(value)
def getValue(self) -> str:
return self._value.text()
def _on_select_path(self):
path = QFileDialog.getExistingDirectory(self, None, self._value.text())
if not path:
return
self.setValue(path)
def _on_open_dir(self):
path = self._value.text()
if os.path.isdir(path):
os.startfile(path)
def resizeEvent(self, event):
super().resizeEvent(event)
self._button_select_path.setFixedHeight(self._value.height())
if __name__ == '__main__':
from PyQt5.QtWidgets import QApplication
app = QApplication([])
mw = SelectDirBox()
mw.valueChanged.connect(
lambda value: print(f'Selected directory: {value}')
)
mw.show()
app.exec()
|
code/nn/optimization.py | serced/rcnn | 372 | 14573 | '''
This file implements various optimization methods, including
-- SGD with gradient norm clipping
-- AdaGrad
-- AdaDelta
-- Adam
Transparent to switch between CPU / GPU.
@author: <NAME> (<EMAIL>)
'''
import random
from collections import OrderedDict
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import HostFromGpu
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
from theano.printing import debugprint
from .initialization import default_mrng
def create_optimization_updates(
cost, params, method="sgd",
max_norm=5, updates=None, gradients=None,
lr=0.01, eps=None, rho=0.99, gamma=0.999,
beta1=0.9, beta2=0.999, momentum=0.0):
_momentum = momentum
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
rho = theano.shared(np.float64(rho).astype(theano.config.floatX))
beta1 = theano.shared(np.float64(beta1).astype(theano.config.floatX))
beta2 = theano.shared(np.float64(beta2).astype(theano.config.floatX))
momentum = theano.shared(np.float64(momentum).astype(theano.config.floatX))
gamma = theano.shared(np.float64(gamma).astype(theano.config.floatX))
if eps is None:
eps = 1e-8 if method.lower() != "esgd" else 1e-4
eps = np.float64(eps).astype(theano.config.floatX)
gparams = T.grad(cost, params) if gradients is None else gradients
g_norm = 0
for g in gparams:
g_norm = g_norm + g.norm(2)**2
g_norm = T.sqrt(g_norm)
# max_norm is useful for sgd
if method != "sgd": max_norm = None
if max_norm is not None and max_norm is not False:
max_norm = theano.shared(np.float64(max_norm).astype(theano.config.floatX))
shrink_factor = T.minimum(max_norm, g_norm + eps) / (g_norm + eps)
gparams_clipped = [ ]
for g in gparams:
g = shrink_factor * g
gparams_clipped.append(g)
gparams = gparams_clipped
if updates is None:
updates = OrderedDict()
gsums = create_accumulators(params) if method != "sgd" or _momentum > 0.0 else \
[ None for p in params ]
xsums = create_accumulators(params) if method != "sgd" and method != "adagrad" else None
if method == "sgd":
create_sgd_updates(updates, params, gparams, gsums, lr, momentum)
elif method == "adagrad":
create_adagrad_updates(updates, params, gparams, gsums, lr, eps)
elif method == "adadelta":
create_adadelta_updates(updates, params, gparams, gsums, xsums, lr, eps, rho)
elif method == "adam":
create_adam_updates(updates, params, gparams, gsums, xsums, lr, eps, beta1, beta2)
elif method == "esgd":
create_esgd_updates(updates, params, gparams, gsums, xsums, lr, eps, gamma, momentum)
else:
raise Exception("Unknown optim method: {}\n".format(method))
if method == "adadelta":
lr = rho
return updates, lr, g_norm, gsums, xsums, max_norm
def is_subtensor_op(p):
if hasattr(p, 'owner') and hasattr(p.owner, 'op'):
return isinstance(p.owner.op, T.AdvancedSubtensor1) or \
isinstance(p.owner.op, T.Subtensor)
return False
def get_subtensor_op_inputs(p):
origin, indexes = p.owner.inputs
if hasattr(origin, 'owner') and hasattr(origin.owner, 'op') and \
isinstance(origin.owner.op, HostFromGpu):
origin = origin.owner.inputs[0]
assert isinstance(origin, CudaNdarraySharedVariable)
return origin, indexes
def get_similar_subtensor(matrix, indexes, param_op):
'''
So far there is only two possible subtensor operation used.
'''
if isinstance(param_op.owner.op, T.AdvancedSubtensor1):
return matrix[indexes]
else:
# indexes is start index in this case
return matrix[indexes:]
def create_accumulators(params):
accums = [ ]
for p in params:
if is_subtensor_op(p):
origin, _ = get_subtensor_op_inputs(p)
acc = theano.shared(np.zeros_like(origin.get_value(borrow=True), \
dtype=theano.config.floatX))
else:
acc = theano.shared(np.zeros_like(p.get_value(borrow=True), \
dtype=theano.config.floatX))
accums.append(acc)
return accums
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
has_momentum = momentum.get_value() > 0.0
for p, g, acc in zip(params, gparams, gsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
if has_momentum:
acc_slices = get_similar_subtensor(acc, indexes, p)
new_acc = acc_slices*momentum + g
updates[acc] = T.set_subtensor(acc_slices, new_acc)
else:
new_acc = g
updates[origin] = T.inc_subtensor(p, - lr * new_acc)
else:
if has_momentum:
new_acc = acc*momentum + g
updates[acc] = new_acc
else:
new_acc = g
updates[p] = p - lr * new_acc
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
for p, g, acc in zip(params, gparams, gsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
#acc_slices = acc[indexes]
acc_slices = get_similar_subtensor(acc, indexes, p)
new_acc = acc_slices + g**2
updates[acc] = T.set_subtensor(acc_slices, new_acc)
updates[origin] = T.inc_subtensor(p, \
- lr * (g / T.sqrt(new_acc + eps)))
else:
new_acc = acc + g**2
updates[acc] = new_acc
updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
#updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
# which one to use?
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
lr, eps, rho):
for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
gacc_slices = gacc[indexes]
xacc_slices = xacc[indexes]
new_gacc = rho * gacc_slices + (1.0-rho) * g**2
d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
new_xacc = rho * xacc_slices + (1.0-rho) * d**2
updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
updates[origin] = T.inc_subtensor(p, d)
else:
new_gacc = rho * gacc + (1.0-rho) * g**2
d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
new_xacc = rho * xacc + (1.0-rho) * d**2
updates[gacc] = new_gacc
updates[xacc] = new_xacc
updates[p] = p + d
def create_adam_updates(updates, params, gparams, gsums, xsums, \
lr, eps, beta1, beta2):
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omb1_t = 1.0 - beta1**i_t
omb2_t = 1.0 - beta2**i_t
lr_t = lr * (T.sqrt(omb2_t) / omb1_t)
for p, g, m, v in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
m_sub = m[indexes]
v_sub = v[indexes]
m_t = beta1*m_sub + (1.0-beta1)*g
v_t = beta2*v_sub + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = T.set_subtensor(m_sub, m_t)
updates[v] = T.set_subtensor(v_sub, v_t)
updates[origin] = T.inc_subtensor(p, -lr_t*g_t)
else:
m_t = beta1*m + (1.0-beta1)*g
v_t = beta2*v + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = m_t
updates[v] = v_t
updates[p] = p - lr_t*g_t
updates[i] = i_t
def create_esgd_updates(updates, params, gparams, gsums, xsums, lr, eps, gamma, momentum):
has_momentum = momentum.get_value() > 0.0
samples = [ default_mrng.normal(size=p.shape, avg=0, std=1,
dtype=theano.config.floatX) for p in params ]
HVs = T.Lop(gparams, params, samples)
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omg_t = 1.0 - gamma**i_t
for p, g, m, D, Hv in zip(params, gparams, gsums, xsums, HVs):
if is_subtensor_op(p):
raise Exception("ESGD subtensor update not implemented!")
else:
D_t = D * gamma + T.sqr(Hv) * (1.0-gamma)
if has_momentum:
m_t = m*momentum + g
updates[m] = m_t
else:
m_t = g
g_t = m_t / ( T.sqrt(D_t/omg_t + eps) )
#g_t = m_t / ( T.sqrt(D_t + eps) )
updates[D] = D_t
updates[p] = p - lr*g_t
updates[i] = i_t
|
deep-rl/lib/python2.7/site-packages/OpenGL/GLES2/vboimplementation.py | ShujaKhalid/deep-rl | 210 | 14583 | from OpenGL.arrays import vbo
from OpenGL.GLES2.VERSION import GLES2_2_0
from OpenGL.GLES2.OES import mapbuffer
class Implementation( vbo.Implementation ):
"""OpenGL-based implementation of VBO interfaces"""
def __init__( self ):
for name in self.EXPORTED_NAMES:
for source in [ GLES2_2_0, mapbuffer ]:
for possible in (name,name+'OES'):
try:
setattr( self, name, getattr( source, possible ))
except AttributeError as err:
pass
else:
found = True
assert found, name
if GLES2_2_0.glBufferData:
self.available = True
Implementation.register()
|
eval.py | CLT29/pvse | 119 | 14595 | from __future__ import print_function
import os, sys
import pickle
import time
import glob
import numpy as np
import torch
from model import PVSE
from loss import cosine_sim, order_sim
from vocab import Vocabulary
from data import get_test_loader
from logger import AverageMeter
from option import parser, verify_input_args
ORDER_BATCH_SIZE = 100
def encode_data(model, data_loader, use_gpu=False):
"""Encode all images and sentences loadable by data_loader"""
# switch to evaluate mode
model.eval()
use_mil = model.module.mil if hasattr(model, 'module') else model.mil
# numpy array to keep all the embeddings
img_embs, txt_embs = None, None
for i, data in enumerate(data_loader):
img, txt, txt_len, ids = data
if torch.cuda.is_available():
img, txt, txt_len = img.cuda(), txt.cuda(), txt_len.cuda()
# compute the embeddings
img_emb, txt_emb, _, _, _, _ = model.forward(img, txt, txt_len)
del img, txt, txt_len
# initialize the output embeddings
if img_embs is None:
if use_gpu:
emb_sz = [len(data_loader.dataset), img_emb.size(1), img_emb.size(2)] \
if use_mil else [len(data_loader.dataset), img_emb.size(1)]
img_embs = torch.zeros(emb_sz, dtype=img_emb.dtype, requires_grad=False).cuda()
txt_embs = torch.zeros(emb_sz, dtype=txt_emb.dtype, requires_grad=False).cuda()
else:
emb_sz = (len(data_loader.dataset), img_emb.size(1), img_emb.size(2)) \
if use_mil else (len(data_loader.dataset), img_emb.size(1))
img_embs = np.zeros(emb_sz)
txt_embs = np.zeros(emb_sz)
# preserve the embeddings by copying from gpu and converting to numpy
img_embs[ids] = img_emb if use_gpu else img_emb.data.cpu().numpy().copy()
txt_embs[ids] = txt_emb if use_gpu else txt_emb.data.cpu().numpy().copy()
return img_embs, txt_embs
def i2t(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Images->Text (Image Annotation)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
index_list = []
ranks, top1 = np.zeros(npts), np.zeros(npts)
for index in range(npts):
# Get query image
im = images[nreps * index]
im = im.reshape((1,) + im.shape)
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = im.mm(sentences.t()).view(-1)
else:
_, K, D = im.shape
sim_kk = im.view(-1, D).mm(sentences.view(-1, D).t())
sim_kk = sim_kk.view(im.size(0), K, sentences.size(0), K)
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(im.size(0), -1, sentences.size(0))
sim, _ = sim_kk.max(dim=1)
sim = sim.flatten()
else:
if order:
if index % ORDER_BATCH_SIZE == 0:
mx = min(images.shape[0], nreps * (index + ORDER_BATCH_SIZE))
im2 = images[nreps * index:mx:nreps]
sim_batch = order_sim(torch.Tensor(im2).cuda(), torch.Tensor(sentences).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[index % ORDER_BATCH_SIZE]
else:
sim = np.tensordot(im, sentences, axes=[2, 2]).max(axis=(0,1,3)).flatten() \
if len(sentences.shape) == 3 else np.dot(im, sentences.T).flatten()
if use_gpu:
_, inds_gpu = sim.sort()
inds = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds = np.argsort(sim)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(nreps * index, nreps * (index + 1), 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Text->Images (Image Search)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
if use_gpu:
ims = torch.stack([images[i] for i in range(0, len(images), nreps)])
else:
ims = np.array([images[i] for i in range(0, len(images), nreps)])
ranks, top1 = np.zeros(nreps * npts), np.zeros(nreps * npts)
for index in range(npts):
# Get query sentences
queries = sentences[nreps * index:nreps * (index + 1)]
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = queries.mm(ims.t())
else:
sim_kk = queries.view(-1, queries.size(-1)).mm(ims.view(-1, ims.size(-1)).t())
sim_kk = sim_kk.view(queries.size(0), queries.size(1), ims.size(0), ims.size(1))
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(queries.size(0), -1, ims.size(0))
sim, _ = sim_kk.max(dim=1)
else:
if order:
if nreps * index % ORDER_BATCH_SIZE == 0:
mx = min(sentences.shape[0], nreps * index + ORDER_BATCH_SIZE)
sentences_batch = sentences[nreps * index:mx]
sim_batch = order_sim(torch.Tensor(images).cuda(),
torch.Tensor(sentences_batch).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[:, (nreps * index) % ORDER_BATCH_SIZE:(nreps * index) % ORDER_BATCH_SIZE + nreps].T
else:
sim = np.tensordot(queries, ims, axes=[2, 2]).max(axis=(1,3)) \
if len(sentences.shape) == 3 else np.dot(queries, ims.T)
inds = np.zeros(sim.shape)
for i in range(len(inds)):
if use_gpu:
_, inds_gpu = sim[i].sort()
inds[i] = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds[i] = np.argsort(sim[i])[::-1]
ranks[nreps * index + i] = np.where(inds[i] == index)[0][0]
top1[nreps * index + i] = inds[i][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def convert_old_state_dict(x, model, multi_gpu=False):
params = model.state_dict()
prefix = ['module.img_enc.', 'module.txt_enc.'] \
if multi_gpu else ['img_enc.', 'txt_enc.']
for i, old_params in enumerate(x):
for key, val in old_params.items():
key = prefix[i] + key.replace('module.','').replace('our_model', 'pie_net')
assert key in params, '{} not found in model state_dict'.format(key)
params[key] = val
return params
def evalrank(model, args, split='test'):
print('Loading dataset')
data_loader = get_test_loader(args, vocab)
print('Computing results... (eval_on_gpu={})'.format(args.eval_on_gpu))
img_embs, txt_embs = encode_data(model, data_loader, args.eval_on_gpu)
n_samples = img_embs.shape[0]
nreps = 5 if args.data_name == 'coco' else 1
print('Images: %d, Sentences: %d' % (img_embs.shape[0] / nreps, txt_embs.shape[0]))
# 5fold cross-validation, only for MSCOCO
mean_metrics = None
if args.data_name == 'coco':
results = []
for i in range(5):
r, rt0 = i2t(img_embs[i*5000:(i + 1)*5000], txt_embs[i*5000:(i + 1)*5000],
nreps=nreps, return_ranks=True, order=args.order, use_gpu=args.eval_on_gpu)
r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
print("Image to text: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)" % r)
ri, rti0 = t2i(img_embs[i*5000:(i + 1)*5000], txt_embs[i*5000:(i + 1)*5000],
nreps=nreps, return_ranks=True, order=args.order, use_gpu=args.eval_on_gpu)
if i == 0:
rt, rti = rt0, rti0
ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4], ri[4] / n_samples)
print("Text to image: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)" % ri)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
print("rsum: %.2f ar: %.2f ari: %.2f" % (rsum, ar, ari))
results += [list(r) + list(ri) + [ar, ari, rsum]]
mean_metrics = tuple(np.array(results).mean(axis=0).flatten())
print("-----------------------------------")
print("Mean metrics from 5-fold evaluation: ")
print("rsum: %.2f" % (mean_metrics[-1] * 6))
print("Average i2t Recall: %.2f" % mean_metrics[-3])
print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % mean_metrics[:7])
print("Average t2i Recall: %.2f" % mean_metrics[-2])
print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % mean_metrics[7:14])
# no cross-validation, full evaluation
r, rt = i2t(img_embs, txt_embs, nreps=nreps, return_ranks=True, use_gpu=args.eval_on_gpu)
ri, rti = t2i(img_embs, txt_embs, nreps=nreps, return_ranks=True, use_gpu=args.eval_on_gpu)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4], ri[4] / n_samples)
print("rsum: %.2f" % rsum)
print("Average i2t Recall: %.2f" % ar)
print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % r)
print("Average t2i Recall: %.2f" % ari)
print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % ri)
return mean_metrics
if __name__ == '__main__':
multi_gpu = torch.cuda.device_count() > 1
args = verify_input_args(parser.parse_args())
opt = verify_input_args(parser.parse_args())
# load vocabulary used by the model
with open('./vocab/%s_vocab.pkl' % args.data_name, 'rb') as f:
vocab = pickle.load(f)
args.vocab_size = len(vocab)
# load model and options
assert os.path.isfile(args.ckpt)
model = PVSE(vocab.word2idx, args)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda() if multi_gpu else model
torch.backends.cudnn.benchmark = True
model.load_state_dict(torch.load(args.ckpt))
# evaluate
metrics = evalrank(model, args, split='test')
|
paddlex/ppdet/modeling/heads/detr_head.py | xiaolao/PaddleX | 3,655 | 14596 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlex.ppdet.core.workspace import register
import pycocotools.mask as mask_util
from ..initializer import linear_init_, constant_
from ..transformers.utils import inverse_sigmoid
__all__ = ['DETRHead', 'DeformableDETRHead']
class MLP(nn.Layer):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.LayerList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
self._reset_parameters()
def _reset_parameters(self):
for l in self.layers:
linear_init_(l)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class MultiHeadAttentionMap(nn.Layer):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self,
query_dim,
hidden_dim,
num_heads,
dropout=0.0,
bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.Constant()) if bias else False
self.q_proj = nn.Linear(query_dim, hidden_dim, weight_attr, bias_attr)
self.k_proj = nn.Conv2D(
query_dim,
hidden_dim,
1,
weight_attr=weight_attr,
bias_attr=bias_attr)
self.normalize_fact = float(hidden_dim / self.num_heads)**-0.5
def forward(self, q, k, mask=None):
q = self.q_proj(q)
k = self.k_proj(k)
bs, num_queries, n, c, h, w = q.shape[0], q.shape[1], self.num_heads,\
self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]
qh = q.reshape([bs, num_queries, n, c])
kh = k.reshape([bs, n, c, h, w])
# weights = paddle.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
qh = qh.transpose([0, 2, 1, 3]).reshape([-1, num_queries, c])
kh = kh.reshape([-1, c, h * w])
weights = paddle.bmm(qh * self.normalize_fact, kh).reshape(
[bs, n, num_queries, h, w]).transpose([0, 2, 1, 3, 4])
if mask is not None:
weights += mask
# fix a potenial bug: https://github.com/facebookresearch/detr/issues/247
weights = F.softmax(weights.flatten(3), axis=-1).reshape(weights.shape)
weights = self.dropout(weights)
return weights
class MaskHeadFPNConv(nn.Layer):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, input_dim, fpn_dims, context_dim, num_groups=8):
super().__init__()
inter_dims = [input_dim,
] + [context_dim // (2**i) for i in range(1, 5)]
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.KaimingUniform())
bias_attr = paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.Constant())
self.conv0 = self._make_layers(input_dim, input_dim, 3, num_groups,
weight_attr, bias_attr)
self.conv_inter = nn.LayerList()
for in_dims, out_dims in zip(inter_dims[:-1], inter_dims[1:]):
self.conv_inter.append(
self._make_layers(in_dims, out_dims, 3, num_groups,
weight_attr, bias_attr))
self.conv_out = nn.Conv2D(
inter_dims[-1],
1,
3,
padding=1,
weight_attr=weight_attr,
bias_attr=bias_attr)
self.adapter = nn.LayerList()
for i in range(len(fpn_dims)):
self.adapter.append(
nn.Conv2D(
fpn_dims[i],
inter_dims[i + 1],
1,
weight_attr=weight_attr,
bias_attr=bias_attr))
def _make_layers(self,
in_dims,
out_dims,
kernel_size,
num_groups,
weight_attr=None,
bias_attr=None):
return nn.Sequential(
nn.Conv2D(
in_dims,
out_dims,
kernel_size,
padding=kernel_size // 2,
weight_attr=weight_attr,
bias_attr=bias_attr),
nn.GroupNorm(num_groups, out_dims),
nn.ReLU())
def forward(self, x, bbox_attention_map, fpns):
x = paddle.concat([
x.tile([bbox_attention_map.shape[1], 1, 1, 1]),
bbox_attention_map.flatten(0, 1)
], 1)
x = self.conv0(x)
for inter_layer, adapter_layer, feat in zip(self.conv_inter[:-1],
self.adapter, fpns):
feat = adapter_layer(feat).tile(
[bbox_attention_map.shape[1], 1, 1, 1])
x = inter_layer(x)
x = feat + F.interpolate(x, size=feat.shape[-2:])
x = self.conv_inter[-1](x)
x = self.conv_out(x)
return x
@register
class DETRHead(nn.Layer):
__shared__ = ['num_classes', 'hidden_dim', 'use_focal_loss']
__inject__ = ['loss']
def __init__(self,
num_classes=80,
hidden_dim=256,
nhead=8,
num_mlp_layers=3,
loss='DETRLoss',
fpn_dims=[1024, 512, 256],
with_mask_head=False,
use_focal_loss=False):
super(DETRHead, self).__init__()
# add background class
self.num_classes = num_classes if use_focal_loss else num_classes + 1
self.hidden_dim = hidden_dim
self.loss = loss
self.with_mask_head = with_mask_head
self.use_focal_loss = use_focal_loss
self.score_head = nn.Linear(hidden_dim, self.num_classes)
self.bbox_head = MLP(hidden_dim,
hidden_dim,
output_dim=4,
num_layers=num_mlp_layers)
if self.with_mask_head:
self.bbox_attention = MultiHeadAttentionMap(hidden_dim, hidden_dim,
nhead)
self.mask_head = MaskHeadFPNConv(hidden_dim + nhead, fpn_dims,
hidden_dim)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.score_head)
@classmethod
def from_config(cls, cfg, hidden_dim, nhead, input_shape):
return {
'hidden_dim': hidden_dim,
'nhead': nhead,
'fpn_dims': [i.channels for i in input_shape[::-1]][1:]
}
@staticmethod
def get_gt_mask_from_polygons(gt_poly, pad_mask):
out_gt_mask = []
for polygons, padding in zip(gt_poly, pad_mask):
height, width = int(padding[:, 0].sum()), int(padding[0, :].sum())
masks = []
for obj_poly in polygons:
rles = mask_util.frPyObjects(obj_poly, height, width)
rle = mask_util.merge(rles)
masks.append(
paddle.to_tensor(mask_util.decode(rle)).astype('float32'))
masks = paddle.stack(masks)
masks_pad = paddle.zeros(
[masks.shape[0], pad_mask.shape[1], pad_mask.shape[2]])
masks_pad[:, :height, :width] = masks
out_gt_mask.append(masks_pad)
return out_gt_mask
def forward(self, out_transformer, body_feats, inputs=None):
r"""
Args:
out_transformer (Tuple): (feats: [num_levels, batch_size,
num_queries, hidden_dim],
memory: [batch_size, hidden_dim, h, w],
src_proj: [batch_size, h*w, hidden_dim],
src_mask: [batch_size, 1, 1, h, w])
body_feats (List(Tensor)): list[[B, C, H, W]]
inputs (dict): dict(inputs)
"""
feats, memory, src_proj, src_mask = out_transformer
outputs_logit = self.score_head(feats)
outputs_bbox = F.sigmoid(self.bbox_head(feats))
outputs_seg = None
if self.with_mask_head:
bbox_attention_map = self.bbox_attention(feats[-1], memory,
src_mask)
fpn_feats = [a for a in body_feats[::-1]][1:]
outputs_seg = self.mask_head(src_proj, bbox_attention_map,
fpn_feats)
outputs_seg = outputs_seg.reshape([
feats.shape[1], feats.shape[2], outputs_seg.shape[-2],
outputs_seg.shape[-1]
])
if self.training:
assert inputs is not None
assert 'gt_bbox' in inputs and 'gt_class' in inputs
gt_mask = self.get_gt_mask_from_polygons(
inputs['gt_poly'],
inputs['pad_mask']) if 'gt_poly' in inputs else None
return self.loss(
outputs_bbox,
outputs_logit,
inputs['gt_bbox'],
inputs['gt_class'],
masks=outputs_seg,
gt_mask=gt_mask)
else:
return (outputs_bbox[-1], outputs_logit[-1], outputs_seg)
@register
class DeformableDETRHead(nn.Layer):
__shared__ = ['num_classes', 'hidden_dim']
__inject__ = ['loss']
def __init__(self,
num_classes=80,
hidden_dim=512,
nhead=8,
num_mlp_layers=3,
loss='DETRLoss'):
super(DeformableDETRHead, self).__init__()
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.nhead = nhead
self.loss = loss
self.score_head = nn.Linear(hidden_dim, self.num_classes)
self.bbox_head = MLP(hidden_dim,
hidden_dim,
output_dim=4,
num_layers=num_mlp_layers)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.score_head)
constant_(self.score_head.bias, -4.595)
constant_(self.bbox_head.layers[-1].weight)
bias = paddle.zeros_like(self.bbox_head.layers[-1].bias)
bias[2:] = -2.0
self.bbox_head.layers[-1].bias.set_value(bias)
@classmethod
def from_config(cls, cfg, hidden_dim, nhead, input_shape):
return {'hidden_dim': hidden_dim, 'nhead': nhead}
def forward(self, out_transformer, body_feats, inputs=None):
r"""
Args:
out_transformer (Tuple): (feats: [num_levels, batch_size,
num_queries, hidden_dim],
memory: [batch_size,
\sum_{l=0}^{L-1} H_l \cdot W_l, hidden_dim],
reference_points: [batch_size, num_queries, 2])
body_feats (List(Tensor)): list[[B, C, H, W]]
inputs (dict): dict(inputs)
"""
feats, memory, reference_points = out_transformer
reference_points = inverse_sigmoid(reference_points.unsqueeze(0))
outputs_bbox = self.bbox_head(feats)
# It's equivalent to "outputs_bbox[:, :, :, :2] += reference_points",
# but the gradient is wrong in paddle.
outputs_bbox = paddle.concat(
[
outputs_bbox[:, :, :, :2] + reference_points,
outputs_bbox[:, :, :, 2:]
],
axis=-1)
outputs_bbox = F.sigmoid(outputs_bbox)
outputs_logit = self.score_head(feats)
if self.training:
assert inputs is not None
assert 'gt_bbox' in inputs and 'gt_class' in inputs
return self.loss(outputs_bbox, outputs_logit, inputs['gt_bbox'],
inputs['gt_class'])
else:
return (outputs_bbox[-1], outputs_logit[-1], None)
|
mapreduce/handlers.py | igeeker/v2ex | 161 | 14600 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Defines executor tasks handlers for MapReduce implementation."""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
import datetime
import logging
import math
import os
from mapreduce.lib import simplejson
import time
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import context
from mapreduce import quota
from mapreduce import model
from mapreduce import quota
from mapreduce import util
# TODO(user): Make this a product of the reader or in quotas.py
_QUOTA_BATCH_SIZE = 20
# The amount of time to perform scanning in one slice. New slice will be
# scheduled as soon as current one takes this long.
_SLICE_DURATION_SEC = 15
# Delay between consecutive controller callback invocations.
_CONTROLLER_PERIOD_SEC = 2
class Error(Exception):
"""Base class for exceptions in this module."""
class NotEnoughArgumentsError(Error):
"""Required argument is missing."""
class NoDataError(Error):
"""There is no data present for a desired input."""
class MapperWorkerCallbackHandler(base_handler.BaseHandler):
"""Callback handler for mapreduce worker task.
Request Parameters:
mapreduce_spec: MapreduceSpec of the mapreduce serialized to json.
shard_id: id of the shard.
slice_id: id of the slice.
"""
def __init__(self, time_function=time.time):
"""Constructor.
Args:
time_function: time function to use to obtain current time.
"""
base_handler.BaseHandler.__init__(self)
self._time = time_function
def post(self):
"""Handle post request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
self._start_time = self._time()
shard_id = self.shard_id()
# TODO(user): Make this prettier
logging.debug("post: shard=%s slice=%s headers=%s",
shard_id, self.slice_id(), self.request.headers)
shard_state, control = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not shard_state:
# We're letting this task to die. It's up to controller code to
# reinitialize and restart the task.
logging.error("State not found for shard ID %r; shutting down",
shard_id)
return
if control and control.command == model.MapreduceControl.ABORT:
logging.info("Abort command received by shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_ABORTED
shard_state.put()
model.MapreduceControl.abort(spec.mapreduce_id)
return
input_reader = self.input_reader(spec.mapper)
if spec.mapper.params.get("enable_quota", True):
quota_consumer = quota.QuotaConsumer(
quota.QuotaManager(memcache.Client()),
shard_id,
_QUOTA_BATCH_SIZE)
else:
quota_consumer = None
ctx = context.Context(spec, shard_state)
context.Context._set(ctx)
try:
# consume quota ahead, because we do not want to run a datastore
# query if there's not enough quota for the shard.
if not quota_consumer or quota_consumer.check():
scan_aborted = False
entity = None
# We shouldn't fetch an entity from the reader if there's not enough
# quota to process it. Perform all quota checks proactively.
if not quota_consumer or quota_consumer.consume():
for entity in input_reader:
if isinstance(entity, db.Model):
shard_state.last_work_item = repr(entity.key())
else:
shard_state.last_work_item = repr(entity)[:100]
scan_aborted = not self.process_entity(entity, ctx)
# Check if we've got enough quota for the next entity.
if (quota_consumer and not scan_aborted and
not quota_consumer.consume()):
scan_aborted = True
if scan_aborted:
break
else:
scan_aborted = True
if not scan_aborted:
logging.info("Processing done for shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
# We consumed extra quota item at the end of for loop.
# Just be nice here and give it back :)
if quota_consumer:
quota_consumer.put(1)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_SUCCESS
# TODO(user): Mike said we don't want this happen in case of
# exception while scanning. Figure out when it's appropriate to skip.
ctx.flush()
finally:
context.Context._set(None)
if quota_consumer:
quota_consumer.dispose()
# Rescheduling work should always be the last statement. It shouldn't happen
# if there were any exceptions in code before it.
if shard_state.active:
self.reschedule(spec, input_reader)
def process_entity(self, entity, ctx):
"""Process a single entity.
Call mapper handler on the entity.
Args:
entity: an entity to process.
ctx: current execution context.
Returns:
True if scan should be continued, False if scan should be aborted.
"""
ctx.counters.increment(context.COUNTER_MAPPER_CALLS)
handler = ctx.mapreduce_spec.mapper.handler
if util.is_generator_function(handler):
for result in handler(entity):
if callable(result):
result(ctx)
else:
try:
if len(result) == 2:
logging.error("Collectors not implemented yet")
else:
logging.error("Got bad output tuple of length %d", len(result))
except TypeError:
logging.error(
"Handler yielded type %s, expected a callable or a tuple",
result.__class__.__name__)
else:
handler(entity)
if self._time() - self._start_time > _SLICE_DURATION_SEC:
logging.debug("Spent %s seconds. Rescheduling",
self._time() - self._start_time)
return False
return True
def shard_id(self):
"""Get shard unique identifier of this task from request.
Returns:
shard identifier as string.
"""
return str(self.request.get("shard_id"))
def slice_id(self):
"""Get slice unique identifier of this task from request.
Returns:
slice identifier as int.
"""
return int(self.request.get("slice_id"))
def input_reader(self, mapper_spec):
"""Get the reader from mapper_spec initialized with the request's state.
Args:
mapper_spec: a mapper spec containing the immutable mapper state.
Returns:
An initialized InputReader.
"""
input_reader_spec_dict = simplejson.loads(
self.request.get("input_reader_state"))
return mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
@staticmethod
def worker_parameters(mapreduce_spec,
shard_id,
slice_id,
input_reader):
"""Fill in mapper worker task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by mapper worker to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
shard_id: id of the shard (part of the whole dataset).
slice_id: id of the slice (part of the shard).
input_reader: InputReader containing the remaining inputs for this
shard.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"shard_id": shard_id,
"slice_id": str(slice_id),
"input_reader_state": input_reader.to_json_str()}
@staticmethod
def get_task_name(shard_id, slice_id):
"""Compute single worker task name.
Args:
shard_id: id of the shard (part of the whole dataset) as string.
slice_id: id of the slice (part of the shard) as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrshard-%s-%s" % (shard_id, slice_id)
def reschedule(self, mapreduce_spec, input_reader):
"""Reschedule worker task to continue scanning work.
Args:
mapreduce_spec: mapreduce specification.
input_reader: remaining input reader to process.
"""
MapperWorkerCallbackHandler.schedule_slice(
self.base_path(), mapreduce_spec, self.shard_id(),
self.slice_id() + 1, input_reader)
@classmethod
def schedule_slice(cls,
base_path,
mapreduce_spec,
shard_id,
slice_id,
input_reader,
queue_name=None,
eta=None,
countdown=None):
"""Schedule slice scanning by adding it to the task queue.
Args:
base_path: base_path of mapreduce request handlers as string.
mapreduce_spec: mapreduce specification as MapreduceSpec.
shard_id: current shard id as string.
slice_id: slice id as int.
input_reader: remaining InputReader for given shard.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
"""
task_params = MapperWorkerCallbackHandler.worker_parameters(
mapreduce_spec, shard_id, slice_id, input_reader)
task_name = MapperWorkerCallbackHandler.get_task_name(shard_id, slice_id)
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
queue_name or "default")
try:
taskqueue.Task(url=base_path + "/worker_callback",
params=task_params,
name=task_name,
eta=eta,
countdown=countdown).add(queue_name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class ControllerCallbackHandler(base_handler.BaseHandler):
"""Supervises mapreduce execution.
Is also responsible for gathering execution status from shards together.
This task is "continuously" running by adding itself again to taskqueue if
mapreduce is still active.
"""
def __init__(self, time_function=time.time):
"""Constructor.
Args:
time_function: time function to use to obtain current time.
"""
base_handler.BaseHandler.__init__(self)
self._time = time_function
def post(self):
"""Handle post request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
# TODO(user): Make this logging prettier.
logging.debug("post: id=%s headers=%s",
spec.mapreduce_id, self.request.headers)
state, control = db.get([
model.MapreduceState.get_key_by_job_id(spec.mapreduce_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not state:
logging.error("State not found for mapreduce_id '%s'; skipping",
spec.mapreduce_id)
return
shard_states = model.ShardState.find_by_mapreduce_id(spec.mapreduce_id)
if state.active and len(shard_states) != spec.mapper.shard_count:
# Some shards were lost
logging.error("Incorrect number of shard states: %d vs %d; "
"aborting job '%s'",
len(shard_states), spec.mapper.shard_count,
spec.mapreduce_id)
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
model.MapreduceControl.abort(spec.mapreduce_id)
active_shards = [s for s in shard_states if s.active]
failed_shards = [s for s in shard_states
if s.result_status == model.ShardState.RESULT_FAILED]
aborted_shards = [s for s in shard_states
if s.result_status == model.ShardState.RESULT_ABORTED]
if state.active:
state.active = bool(active_shards)
state.active_shards = len(active_shards)
state.failed_shards = len(failed_shards)
state.aborted_shards = len(aborted_shards)
if (not state.active and control and
control.command == model.MapreduceControl.ABORT):
# User-initiated abort *after* all shards have completed.
logging.info("Abort signal received for job '%s'", spec.mapreduce_id)
state.result_status = model.MapreduceState.RESULT_ABORTED
if not state.active:
state.active_shards = 0
if not state.result_status:
# Set final result status derived from shard states.
if [s for s in shard_states
if s.result_status != model.ShardState.RESULT_SUCCESS]:
state.result_status = model.MapreduceState.RESULT_FAILED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
logging.info("Final result for job '%s' is '%s'",
spec.mapreduce_id, state.result_status)
# We don't need a transaction here, since we change only statistics data,
# and we don't care if it gets overwritten/slightly inconsistent.
self.aggregate_state(state, shard_states)
poll_time = state.last_poll_time
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
if not state.active:
# This is the last execution.
# Enqueue done_callback if needed.
def put_state(state):
state.put()
done_callback = spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
if done_callback:
taskqueue.Task(
url=done_callback,
headers={"Mapreduce-Id": spec.mapreduce_id}).add(
spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE,
"default"),
transactional=True)
db.run_in_transaction(put_state, state)
return
else:
state.put()
processing_rate = int(spec.mapper.params.get(
"processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC)
self.refill_quotas(poll_time, processing_rate, active_shards)
ControllerCallbackHandler.reschedule(
self.base_path(), spec, self.serial_id() + 1)
def aggregate_state(self, mapreduce_state, shard_states):
"""Update current mapreduce state by aggregating shard states.
Args:
mapreduce_state: current mapreduce state as MapreduceState.
shard_states: all shard states (active and inactive). list of ShardState.
"""
processed_counts = []
mapreduce_state.counters_map.clear()
for shard_state in shard_states:
mapreduce_state.counters_map.add_map(shard_state.counters_map)
processed_counts.append(shard_state.counters_map.get(
context.COUNTER_MAPPER_CALLS))
mapreduce_state.set_processed_counts(processed_counts)
def refill_quotas(self,
last_poll_time,
processing_rate,
active_shard_states):
"""Refill quotas for all active shards.
Args:
last_poll_time: Datetime with the last time the job state was updated.
processing_rate: How many items to process per second overall.
active_shard_states: All active shard states, list of ShardState.
"""
if not active_shard_states:
return
quota_manager = quota.QuotaManager(memcache.Client())
current_time = int(self._time())
last_poll_time = time.mktime(last_poll_time.timetuple())
total_quota_refill = processing_rate * max(0, current_time - last_poll_time)
quota_refill = int(math.ceil(
1.0 * total_quota_refill / len(active_shard_states)))
if not quota_refill:
return
# TODO(user): use batch memcache API to refill quota in one API call.
for shard_state in active_shard_states:
quota_manager.put(shard_state.shard_id, quota_refill)
def serial_id(self):
"""Get serial unique identifier of this task from request.
Returns:
serial identifier as int.
"""
return int(self.request.get("serial_id"))
@staticmethod
def get_task_name(mapreduce_spec, serial_id):
"""Compute single controller task name.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrcontrol-%s-%s" % (
mapreduce_spec.mapreduce_id, serial_id)
@staticmethod
def controller_parameters(mapreduce_spec, serial_id):
"""Fill in controller task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by controller to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"serial_id": str(serial_id)}
@classmethod
def reschedule(cls, base_path, mapreduce_spec, serial_id, queue_name=None):
"""Schedule new update status callback task.
Args:
base_path: mapreduce handlers url base path as string.
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
"""
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
try:
taskqueue.Task(url=base_path + "/controller_callback",
name=task_name, params=task_params,
countdown=_CONTROLLER_PERIOD_SEC).add(queue_name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class KickOffJobHandler(base_handler.BaseHandler):
"""Taskqueue handler which kicks off a mapreduce processing.
Request Parameters:
mapreduce_spec: MapreduceSpec of the mapreduce serialized to json.
input_readers: List of InputReaders objects separated by semi-colons.
"""
def post(self):
"""Handles kick off request."""
spec = model.MapreduceSpec.from_json_str(
self._get_required_param("mapreduce_spec"))
input_readers_json = simplejson.loads(
self._get_required_param("input_readers"))
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
mapper_input_reader_class = spec.mapper.input_reader_class()
input_readers = [mapper_input_reader_class.from_json_str(reader_json)
for reader_json in input_readers_json]
KickOffJobHandler._schedule_shards(
spec, input_readers, queue_name, self.base_path())
ControllerCallbackHandler.reschedule(
self.base_path(), spec, queue_name=queue_name, serial_id=0)
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _schedule_shards(cls, spec, input_readers, queue_name, base_path):
"""Prepares shard states and schedules their execution.
Args:
spec: mapreduce specification as MapreduceSpec.
input_readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
"""
# Note: it's safe to re-attempt this handler because:
# - shard state has deterministic and unique key.
# - schedule_slice will fall back gracefully if a task already exists.
shard_states = []
for shard_number, input_reader in enumerate(input_readers):
shard = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard.shard_description = str(input_reader)
shard_states.append(shard)
# Retrievs already existing shards.
existing_shard_states = db.get(shard.key() for shard in shard_states)
existing_shard_keys = set(shard.key() for shard in existing_shard_states
if shard is not None)
# Puts only non-existing shards.
db.put(shard for shard in shard_states
if shard.key() not in existing_shard_keys)
for shard_number, input_reader in enumerate(input_readers):
shard_id = model.ShardState.shard_id_from_number(
spec.mapreduce_id, shard_number)
MapperWorkerCallbackHandler.schedule_slice(
base_path, spec, shard_id, 0, input_reader, queue_name=queue_name)
class StartJobHandler(base_handler.JsonHandler):
"""Command handler starts a mapreduce job."""
def handle(self):
"""Handles start request."""
# Mapper spec as form arguments.
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
# Set some mapper param defaults if not present.
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC)
queue_name = mapper_params["queue_name"] = mapper_params.get(
"queue_name", "default")
# Validate the Mapper spec, handler, and input reader.
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", model._DEFAULT_SHARD_COUNT)))
mapreduce_id = type(self)._start_map(
mapreduce_name,
mapper_spec,
params,
base_path=self.base_path(),
queue_name=queue_name,
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id
def _get_params(self, validator_parameter, name_prefix):
"""Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
"""
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _start_map(cls, name, mapper_spec,
mapreduce_params,
base_path="/mapreduce",
queue_name="default",
eta=None,
countdown=None,
_app=None):
# Check that handler can be instantiated.
mapper_spec.get_handler()
mapper_input_reader_class = mapper_spec.input_reader_class()
mapper_input_readers = mapper_input_reader_class.split_input(mapper_spec)
if not mapper_input_readers:
raise NoDataError("Found no mapper input readers to process.")
mapper_spec.shard_count = len(mapper_input_readers)
state = model.MapreduceState.create_new()
mapreduce_spec = model.MapreduceSpec(
name,
state.key().id_or_name(),
mapper_spec.to_json(),
mapreduce_params)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = mapper_spec.shard_count
if _app:
state.app_id = _app
# TODO(user): Initialize UI fields correctly.
state.char_url = ""
state.sparkline_url = ""
def schedule_mapreduce(state, mapper_input_readers, eta, countdown):
state.put()
readers_json = [reader.to_json_str() for reader in mapper_input_readers]
taskqueue.Task(
url=base_path + "/kickoffjob_callback",
params={"mapreduce_spec": state.mapreduce_spec.to_json_str(),
"input_readers": simplejson.dumps(readers_json)},
eta=eta, countdown=countdown).add(queue_name, transactional=True)
# Point of no return: We're actually going to run this job!
db.run_in_transaction(
schedule_mapreduce, state, mapper_input_readers, eta, countdown)
return state.key().id_or_name()
class CleanUpJobHandler(base_handler.JsonHandler):
"""Command to kick off tasks to clean up a job's data."""
def handle(self):
# TODO(user): Have this kick off a task to clean up all MapreduceState,
# ShardState, and MapreduceControl entities for a job ID.
self.json_response["status"] = "This does nothing yet."
class AbortJobHandler(base_handler.JsonHandler):
"""Command to abort a running job."""
def handle(self):
model.MapreduceControl.abort(self.request.get("mapreduce_id"))
self.json_response["status"] = "Abort signal sent."
|
src/third_party/swiftshader/third_party/subzero/pydir/wasm-run-torture-tests.py | rhencke/engine | 2,151 | 14605 | <filename>src/third_party/swiftshader/third_party/subzero/pydir/wasm-run-torture-tests.py
#!/usr/bin/env python2
#===- subzero/wasm-run-torture-tests.py - Subzero WASM Torture Test Driver ===//
#
# The Subzero Code Generator
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===-----------------------------------------------------------------------===//
from __future__ import print_function
import argparse
import glob
import multiprocessing
import os
import Queue
import shutil
import StringIO
import sys
import threading
IGNORED_TESTS = set([
# The remaining tests are known waterfall failures
'20010122-1.c.wasm',
'20031003-1.c.wasm',
'20071018-1.c.wasm',
'20071120-1.c.wasm',
'20071220-1.c.wasm',
'20071220-2.c.wasm',
'20101011-1.c.wasm',
'alloca-1.c.wasm',
'bitfld-3.c.wasm',
'bitfld-5.c.wasm',
'builtin-bitops-1.c.wasm',
'conversion.c.wasm',
'eeprof-1.c.wasm',
'frame-address.c.wasm',
'pr17377.c.wasm',
'pr32244-1.c.wasm',
'pr34971.c.wasm',
'pr36765.c.wasm',
'pr39228.c.wasm',
'pr43008.c.wasm',
'pr47237.c.wasm',
'pr60960.c.wasm',
'va-arg-pack-1.c.wasm',
'20000717-5.c.wasm', # abort() (also works without emcc)
'20001203-2.c.wasm', # assert fail (works without emcc)
'20040811-1.c.wasm', # OOB trap
'20070824-1.c.wasm', # abort() (also works without emcc)
'arith-rand-ll.c.wasm', # abort() (works without emcc)
'arith-rand.c.wasm', # abort() (works without emcc)
'pr23135.c.wasm', # OOB trap (works without emcc)
'pr34415.c.wasm', # (empty output?)
'pr36339.c.wasm', # abort() (works without emcc)
'pr38048-2.c.wasm', # abort() (works without emcc)
'pr42691.c.wasm', # abort() (works without emcc)
'pr43220.c.wasm', # OOB trap (works without emcc)
'pr43269.c.wasm', # abort() (works without emcc)
'vla-dealloc-1.c.wasm', # OOB trap (works without emcc)
'20051012-1.c.wasm', # error reading binary
'921208-2.c.wasm', # error reading binary
'920501-1.c.wasm', # error reading binary
'call-trap-1.c.wasm', # error reading binary
'pr44942.c.wasm', # error reading binary
'920625-1.c.wasm', # abort() (also fails without emcc)
'931004-10.c.wasm', # abort() (also fails without emcc)
'931004-12.c.wasm', # abort() (also fails without emcc)
'931004-14.c.wasm', # abort() (also fails without emcc)
'931004-6.c.wasm', # abort() (also fails without emcc)
'pr38051.c.wasm', # (empty output?) (fails without emcc)
'pr38151.c.wasm', # abort() (fails without emcc)
'pr44575.c.wasm', # abort() (fails without emcc)
'strct-stdarg-1.c.wasm', # abort() (fails without emcc)
'strct-varg-1.c.wasm', # abort() (fails without emcc)
'va-arg-22.c.wasm', # abort() (fails without emcc)
'stdarg-3.c.wasm', # abort() (fails without emcc)
'pr56982.c.wasm', # missing setjmp (wasm.js check did not catch)
'20010605-2.c.wasm', # missing __netf2
'20020413-1.c.wasm', # missing __lttf2
'20030914-1.c.wasm', # missing __floatsitf
'20040709-1.c.wasm', # missing __netf2
'20040709-2.c.wasm', # missing __netf2
'20050121-1.c.wasm', # missing __floatsitf
'20080502-1.c.wasm', # missing __eqtf2
'920501-8.c.wasm', # missing __extenddftf2
'930513-1.c.wasm', # missing __extenddftf2
'930622-2.c.wasm', # missing __floatditf
'960215-1.c.wasm', # missing __addtf3
'960405-1.c.wasm', # missing __eqtf2
'960513-1.c.wasm', # missing __subtf3
'align-2.c.wasm', # missing __eqtf2
'complex-6.c.wasm', # missing __subtf3
'complex-7.c.wasm', # missing __netf2
'pr49218.c.wasm', # missing __fixsfti
'pr54471.c.wasm', # missing __multi3
'regstack-1.c.wasm', # missing __addtf3
'stdarg-1.c.wasm', # missing __netf2
'stdarg-2.c.wasm', # missing __floatsitf
'va-arg-5.c.wasm', # missing __eqtf2
'va-arg-6.c.wasm', # missing __eqtf2
'struct-ret-1.c.wasm', # missing __extenddftf2
])
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--translate-only', action='store_true')
parser.add_argument('tests', nargs='*')
args = parser.parse_args()
OUT_DIR = "./build/wasm-torture"
results_lock = threading.Lock()
compile_count = 0
compile_failures = []
run_count = 0
run_failures = []
def run_test(test_file, verbose=False):
global args
global compile_count
global compile_failures
global results_lock
global run_count
global run_failures
global OUT_DIR
global IGNORED_TESTS
run_test = not args.translate_only
test_name = os.path.basename(test_file)
obj_file = os.path.join(OUT_DIR, test_name + ".o")
exe_file = os.path.join(OUT_DIR, test_name + ".exe")
if not verbose and test_name in IGNORED_TESTS:
print("\033[1;34mSkipping {}\033[1;m".format(test_file))
return
cmd = """LD_LIBRARY_PATH=../../../../v8/out/native/lib.target ./pnacl-sz \
-filetype=obj -target=x8632 {} -threads=0 -O2 \
-verbose=wasm -o {}""".format(test_file, obj_file)
if not verbose:
cmd += " &> /dev/null"
out = StringIO.StringIO()
out.write(test_file + " ...");
status = os.system(cmd);
if status != 0:
print('\033[1;31m[compile fail]\033[1;m', file=out)
with results_lock:
compile_failures.append(test_file)
else:
compile_count += 1
# Try to link and run the program.
cmd = "clang -g -m32 {} -o {} " + \
"./runtime/szrt.c ./runtime/wasm-runtime.cpp -lm -lstdc++"
cmd = cmd.format(obj_file, exe_file)
if not run_test or os.system(cmd) == 0:
if not run_test or os.system(exe_file) == 0:
with results_lock:
run_count += 1
print('\033[1;32m[ok]\033[1;m', file=out)
else:
with results_lock:
run_failures.append(test_file)
print('\033[1;33m[run fail]\033[1;m', file=out)
else:
with results_lock:
run_failures.append(test_file)
print('\033[1;33m[run fail]\033[1;m', file=out)
sys.stdout.write(out.getvalue())
verbose = args.verbose
if len(args.tests) > 0:
test_files = args.tests
else:
test_files = glob.glob("./emwasm-torture-out/*.wasm")
if os.path.exists(OUT_DIR):
shutil.rmtree(OUT_DIR)
os.mkdir(OUT_DIR)
tasks = Queue.Queue()
def worker():
while True:
run_test(tasks.get(), verbose)
tasks.task_done()
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
for test_file in test_files:
tasks.put(test_file)
tasks.join()
if len(compile_failures) > 0:
print()
print("Compilation failures:")
print("=====================\n")
for f in compile_failures:
print(" \033[1;31m" + f + "\033[1;m")
if len(run_failures) > 0:
print()
print("Run failures:")
print("=============\n")
for f in run_failures:
print(" \033[1;33m" + f + "\033[1;m")
print("\n\033[1;32m{}\033[1;m / \033[1;33m{}\033[1;m / {} tests passed"
.format(run_count, compile_count - run_count,
run_count + len(compile_failures) + len(run_failures)))
|
tests/storage/test_filesystem.py | dilyanpalauzov/vdirsyncer | 888 | 14613 | <filename>tests/storage/test_filesystem.py
import subprocess
import aiostream
import pytest
from vdirsyncer.storage.filesystem import FilesystemStorage
from vdirsyncer.vobject import Item
from . import StorageTests
class TestFilesystemStorage(StorageTests):
storage_class = FilesystemStorage
@pytest.fixture
def get_storage_args(self, tmpdir):
async def inner(collection="test"):
rv = {"path": str(tmpdir), "fileext": ".txt", "collection": collection}
if collection is not None:
rv = await self.storage_class.create_collection(**rv)
return rv
return inner
def test_is_not_directory(self, tmpdir):
with pytest.raises(OSError):
f = tmpdir.join("hue")
f.write("stub")
self.storage_class(str(tmpdir) + "/hue", ".txt")
@pytest.mark.asyncio
async def test_broken_data(self, tmpdir):
s = self.storage_class(str(tmpdir), ".txt")
class BrokenItem:
raw = "Ц, Ш, Л, ж, Д, З, Ю".encode()
uid = "jeezus"
ident = uid
with pytest.raises(TypeError):
await s.upload(BrokenItem)
assert not tmpdir.listdir()
@pytest.mark.asyncio
async def test_ident_with_slash(self, tmpdir):
s = self.storage_class(str(tmpdir), ".txt")
await s.upload(Item("UID:a/b/c"))
(item_file,) = tmpdir.listdir()
assert "/" not in item_file.basename and item_file.isfile()
@pytest.mark.asyncio
async def test_ignore_tmp_files(self, tmpdir):
"""Test that files with .tmp suffix beside .ics files are ignored."""
s = self.storage_class(str(tmpdir), ".ics")
await s.upload(Item("UID:xyzxyz"))
(item_file,) = tmpdir.listdir()
item_file.copy(item_file.new(ext="tmp"))
assert len(tmpdir.listdir()) == 2
assert len(await aiostream.stream.list(s.list())) == 1
@pytest.mark.asyncio
async def test_ignore_tmp_files_empty_fileext(self, tmpdir):
"""Test that files with .tmp suffix are ignored with empty fileext."""
s = self.storage_class(str(tmpdir), "")
await s.upload(Item("UID:xyzxyz"))
(item_file,) = tmpdir.listdir()
item_file.copy(item_file.new(ext="tmp"))
assert len(tmpdir.listdir()) == 2
# assert False, tmpdir.listdir() # enable to see the created filename
assert len(await aiostream.stream.list(s.list())) == 1
@pytest.mark.asyncio
async def test_ignore_files_typical_backup(self, tmpdir):
"""Test file-name ignorance with typical backup ending ~."""
ignorext = "~" # without dot
storage = self.storage_class(str(tmpdir), "", fileignoreext=ignorext)
await storage.upload(Item("UID:xyzxyz"))
(item_file,) = tmpdir.listdir()
item_file.copy(item_file.new(basename=item_file.basename + ignorext))
assert len(tmpdir.listdir()) == 2
assert len(await aiostream.stream.list(storage.list())) == 1
@pytest.mark.asyncio
async def test_too_long_uid(self, tmpdir):
storage = self.storage_class(str(tmpdir), ".txt")
item = Item("UID:" + "hue" * 600)
href, etag = await storage.upload(item)
assert item.uid not in href
@pytest.mark.asyncio
async def test_post_hook_inactive(self, tmpdir, monkeypatch):
def check_call_mock(*args, **kwargs):
raise AssertionError()
monkeypatch.setattr(subprocess, "call", check_call_mock)
s = self.storage_class(str(tmpdir), ".txt", post_hook=None)
await s.upload(Item("UID:a/b/c"))
@pytest.mark.asyncio
async def test_post_hook_active(self, tmpdir, monkeypatch):
calls = []
exe = "foo"
def check_call_mock(call, *args, **kwargs):
calls.append(True)
assert len(call) == 2
assert call[0] == exe
monkeypatch.setattr(subprocess, "call", check_call_mock)
s = self.storage_class(str(tmpdir), ".txt", post_hook=exe)
await s.upload(Item("UID:a/b/c"))
assert calls
@pytest.mark.asyncio
async def test_ignore_git_dirs(self, tmpdir):
tmpdir.mkdir(".git").mkdir("foo")
tmpdir.mkdir("a")
tmpdir.mkdir("b")
expected = {"a", "b"}
actual = {
c["collection"] async for c in self.storage_class.discover(str(tmpdir))
}
assert actual == expected
|
mhcflurry/select_allele_specific_models_command.py | ignatovmg/mhcflurry | 113 | 14615 | """
Model select class1 single allele models.
"""
import argparse
import os
import signal
import sys
import time
import traceback
import random
from functools import partial
from pprint import pprint
import numpy
import pandas
from scipy.stats import kendalltau, percentileofscore, pearsonr
from sklearn.metrics import roc_auc_score
import tqdm # progress bar
tqdm.monitor_interval = 0 # see https://github.com/tqdm/tqdm/issues/481
from .class1_affinity_predictor import Class1AffinityPredictor
from .common import normalize_allele_name
from .encodable_sequences import EncodableSequences
from .common import configure_logging, random_peptides
from .local_parallelism import worker_pool_with_gpu_assignments_from_args, add_local_parallelism_args
from .regression_target import from_ic50
# To avoid pickling large matrices to send to child processes when running in
# parallel, we use this global variable as a place to store data. Data that is
# stored here before creating the thread pool will be inherited to the child
# processes upon fork() call, allowing us to share large data with the workers
# via shared memory.
GLOBAL_DATA = {}
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--data",
metavar="FILE.csv",
required=False,
help=(
"Model selection data CSV. Expected columns: "
"allele, peptide, measurement_value"))
parser.add_argument(
"--exclude-data",
metavar="FILE.csv",
required=False,
help=(
"Data to EXCLUDE from model selection. Useful to specify the original "
"training data used"))
parser.add_argument(
"--models-dir",
metavar="DIR",
required=True,
help="Directory to read models")
parser.add_argument(
"--out-models-dir",
metavar="DIR",
required=True,
help="Directory to write selected models")
parser.add_argument(
"--out-unselected-predictions",
metavar="FILE.csv",
help="Write predictions for validation data using unselected predictor to "
"FILE.csv")
parser.add_argument(
"--unselected-accuracy-scorer",
metavar="SCORER",
default="combined:mass-spec,mse")
parser.add_argument(
"--unselected-accuracy-scorer-num-samples",
type=int,
default=1000)
parser.add_argument(
"--unselected-accuracy-percentile-threshold",
type=float,
metavar="X",
default=95)
parser.add_argument(
"--allele",
default=None,
nargs="+",
help="Alleles to select models for. If not specified, all alleles with "
"enough measurements will be used.")
parser.add_argument(
"--combined-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using combined selector")
parser.add_argument(
"--combined-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using combined selector")
parser.add_argument(
"--combined-min-contribution-percent",
type=float,
default=1.0,
metavar="X",
help="Use only model selectors that can contribute at least X %% to the "
"total score. Default: %(default)s")
parser.add_argument(
"--mass-spec-min-measurements",
type=int,
metavar="N",
default=1,
help="Min number of measurements required for an allele to use mass-spec model "
"selection")
parser.add_argument(
"--mass-spec-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using mass-spec selector")
parser.add_argument(
"--mass-spec-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using mass-spec selector")
parser.add_argument(
"--mse-min-measurements",
type=int,
metavar="N",
default=1,
help="Min number of measurements required for an allele to use MSE model "
"selection")
parser.add_argument(
"--mse-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using MSE selector")
parser.add_argument(
"--mse-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using MSE selector")
parser.add_argument(
"--scoring",
nargs="+",
default=["mse", "consensus"],
help="Scoring procedures to use in order")
parser.add_argument(
"--consensus-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using consensus selector")
parser.add_argument(
"--consensus-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using consensus selector")
parser.add_argument(
"--consensus-num-peptides-per-length",
type=int,
default=10000,
help="Num peptides per length to use for consensus scoring")
parser.add_argument(
"--mass-spec-regex",
metavar="REGEX",
default="mass[- ]spec",
help="Regular expression for mass-spec data. Runs on measurement_source col."
"Default: %(default)s.")
parser.add_argument(
"--verbosity",
type=int,
help="Keras verbosity. Default: %(default)s",
default=0)
add_local_parallelism_args(parser)
def run(argv=sys.argv[1:]):
global GLOBAL_DATA
# On sigusr1 print stack trace
print("To show stack trace, run:\nkill -s USR1 %d" % os.getpid())
signal.signal(signal.SIGUSR1, lambda sig, frame: traceback.print_stack())
args = parser.parse_args(argv)
args.out_models_dir = os.path.abspath(args.out_models_dir)
configure_logging(verbose=args.verbosity > 1)
input_predictor = Class1AffinityPredictor.load(args.models_dir)
print("Loaded: %s" % input_predictor)
if args.allele:
alleles = [normalize_allele_name(a) for a in args.allele]
else:
alleles = input_predictor.supported_alleles
metadata_dfs = {}
if args.data:
df = pandas.read_csv(args.data)
print("Loaded data: %s" % (str(df.shape)))
df = df.loc[
(df.peptide.str.len() >= 8) & (df.peptide.str.len() <= 15)
]
print("Subselected to 8-15mers: %s" % (str(df.shape)))
# Allele names in data are assumed to be already normalized.
df = df.loc[df.allele.isin(alleles)].dropna()
print("Selected %d alleles: %s" % (len(alleles), ' '.join(alleles)))
if args.exclude_data:
exclude_df = pandas.read_csv(args.exclude_data)
metadata_dfs["model_selection_exclude"] = exclude_df
print("Loaded exclude data: %s" % (str(df.shape)))
df["_key"] = df.allele + "__" + df.peptide
exclude_df["_key"] = exclude_df.allele + "__" + exclude_df.peptide
df["_excluded"] = df._key.isin(exclude_df._key.unique())
print("Excluding measurements per allele (counts): ")
print(df.groupby("allele")._excluded.sum())
print("Excluding measurements per allele (fractions): ")
print(df.groupby("allele")._excluded.mean())
df = df.loc[~df._excluded]
del df["_excluded"]
del df["_key"]
print("Reduced data to: %s" % (str(df.shape)))
metadata_dfs["model_selection_data"] = df
df["mass_spec"] = df.measurement_source.str.contains(
args.mass_spec_regex)
else:
df = None
if args.out_unselected_predictions:
df["unselected_prediction"] = input_predictor.predict(
alleles=df.allele.values,
peptides=df.peptide.values)
df.to_csv(args.out_unselected_predictions)
print("Wrote: %s" % args.out_unselected_predictions)
selectors = {}
selector_to_model_selection_kwargs = {}
def make_selector(
scoring,
combined_min_contribution_percent=args.combined_min_contribution_percent):
if scoring in selectors:
return (
selectors[scoring], selector_to_model_selection_kwargs[scoring])
start = time.time()
if scoring.startswith("combined:"):
model_selection_kwargs = {
'min_models': args.combined_min_models,
'max_models': args.combined_max_models,
}
component_selectors = []
for component_selector in scoring.split(":", 1)[1].split(","):
component_selectors.append(
make_selector(
component_selector)[0])
selector = CombinedModelSelector(
component_selectors,
min_contribution_percent=combined_min_contribution_percent)
elif scoring == "mse":
model_selection_kwargs = {
'min_models': args.mse_min_models,
'max_models': args.mse_max_models,
}
min_measurements = args.mse_min_measurements
selector = MSEModelSelector(
df=df.loc[~df.mass_spec],
predictor=input_predictor,
min_measurements=min_measurements)
elif scoring == "mass-spec":
mass_spec_df = df.loc[df.mass_spec]
model_selection_kwargs = {
'min_models': args.mass_spec_min_models,
'max_models': args.mass_spec_max_models,
}
min_measurements = args.mass_spec_min_measurements
selector = MassSpecModelSelector(
df=mass_spec_df,
predictor=input_predictor,
min_measurements=min_measurements)
elif scoring == "consensus":
model_selection_kwargs = {
'min_models': args.consensus_min_models,
'max_models': args.consensus_max_models,
}
selector = ConsensusModelSelector(
predictor=input_predictor,
num_peptides_per_length=args.consensus_num_peptides_per_length)
else:
raise ValueError("Unsupported scoring method: %s" % scoring)
print("Instantiated model selector %s in %0.2f sec." % (
scoring, time.time() - start))
return (selector, model_selection_kwargs)
for scoring in args.scoring:
(selector, model_selection_kwargs) = make_selector(scoring)
selectors[scoring] = selector
selector_to_model_selection_kwargs[scoring] = model_selection_kwargs
unselected_accuracy_scorer = None
if args.unselected_accuracy_scorer:
# Force running all selectors by setting combined_min_contribution_percent=0.
unselected_accuracy_scorer = make_selector(
args.unselected_accuracy_scorer,
combined_min_contribution_percent=0.0)[0]
print("Using unselected accuracy scorer: %s" % unselected_accuracy_scorer)
GLOBAL_DATA["unselected_accuracy_scorer"] = unselected_accuracy_scorer
print("Selectors for alleles:")
allele_to_selector = {}
allele_to_model_selection_kwargs = {}
for allele in alleles:
selector = None
for possible_selector in args.scoring:
if selectors[possible_selector].usable_for_allele(allele=allele):
selector = selectors[possible_selector]
print("%20s %s" % (allele, selector.plan_summary(allele)))
break
if selector is None:
raise ValueError("No selectors usable for allele: %s" % allele)
allele_to_selector[allele] = selector
allele_to_model_selection_kwargs[allele] = (
selector_to_model_selection_kwargs[possible_selector])
GLOBAL_DATA["args"] = args
GLOBAL_DATA["input_predictor"] = input_predictor
GLOBAL_DATA["unselected_accuracy_scorer"] = unselected_accuracy_scorer
GLOBAL_DATA["allele_to_selector"] = allele_to_selector
GLOBAL_DATA["allele_to_model_selection_kwargs"] = allele_to_model_selection_kwargs
if not os.path.exists(args.out_models_dir):
print("Attempting to create directory: %s" % args.out_models_dir)
os.mkdir(args.out_models_dir)
print("Done.")
result_predictor = Class1AffinityPredictor(metadata_dataframes=metadata_dfs)
worker_pool = worker_pool_with_gpu_assignments_from_args(args)
start = time.time()
if worker_pool is None:
# Serial run
print("Running in serial.")
results = (
model_select(allele) for allele in alleles)
else:
# Parallel run
random.shuffle(alleles)
results = worker_pool.imap_unordered(
partial(model_select, constant_data=GLOBAL_DATA),
alleles,
chunksize=1)
unselected_summary = []
model_selection_dfs = []
for result in tqdm.tqdm(results, total=len(alleles)):
pprint(result)
summary_dict = dict(result)
summary_dict["retained"] = result["selected"] is not None
del summary_dict["selected"]
unselected_summary.append(summary_dict)
if result['selected'] is not None:
model_selection_dfs.append(
result['selected'].metadata_dataframes['model_selection'])
result_predictor.merge_in_place([result['selected']])
if model_selection_dfs:
model_selection_df = pandas.concat(
model_selection_dfs, ignore_index=True)
model_selection_df["selector"] = model_selection_df.allele.map(
allele_to_selector)
result_predictor.metadata_dataframes["model_selection"] = (
model_selection_df)
result_predictor.metadata_dataframes["unselected_summary"] = (
pandas.DataFrame(unselected_summary))
print("Done model selecting for %d alleles." % len(alleles))
result_predictor.save(args.out_models_dir)
model_selection_time = time.time() - start
if worker_pool:
worker_pool.close()
worker_pool.join()
print("Model selection time %0.2f min." % (model_selection_time / 60.0))
print("Predictor written to: %s" % args.out_models_dir)
class ScrambledPredictor(object):
def __init__(self, predictor):
self.predictor = predictor
self._predictions = {}
self._allele = None
def predict(self, peptides, allele):
if peptides not in self._predictions:
self._predictions[peptides] = pandas.Series(
self.predictor.predict(peptides=peptides, allele=allele))
self._allele = allele
assert allele == self._allele
return self._predictions[peptides].sample(frac=1.0).values
def model_select(allele, constant_data=GLOBAL_DATA):
unselected_accuracy_scorer = constant_data["unselected_accuracy_scorer"]
selector = constant_data["allele_to_selector"][allele]
model_selection_kwargs = constant_data[
"allele_to_model_selection_kwargs"
][allele]
predictor = constant_data["input_predictor"]
args = constant_data["args"]
unselected_accuracy_scorer_samples = constant_data["args"].unselected_accuracy_scorer_num_samples
result_dict = {
"allele": allele
}
unselected_score = None
unselected_score_percentile = None
unselected_score_scrambled_mean = None
if unselected_accuracy_scorer:
unselected_score_function = (
unselected_accuracy_scorer.score_function(allele))
additional_metadata = {}
unselected_score = unselected_score_function(
predictor, additional_metadata_out=additional_metadata)
scrambled_predictor = ScrambledPredictor(predictor)
scrambled_scores = numpy.array([
unselected_score_function(
scrambled_predictor)
for _ in range(unselected_accuracy_scorer_samples)
])
unselected_score_scrambled_mean = scrambled_scores.mean()
unselected_score_percentile = percentileofscore(
scrambled_scores, unselected_score)
print(
"Unselected score and percentile",
allele,
unselected_score,
unselected_score_percentile,
additional_metadata)
result_dict.update(
dict(("unselected_%s" % key, value)
for (key, value)
in additional_metadata.items()))
selected = None
threshold = args.unselected_accuracy_percentile_threshold
if unselected_score_percentile is None or unselected_score_percentile >= threshold:
selected = predictor.model_select(
score_function=selector.score_function(allele=allele),
alleles=[allele],
**model_selection_kwargs)
result_dict["unselected_score_plan"] = (
unselected_accuracy_scorer.plan_summary(allele)
if unselected_accuracy_scorer else None)
result_dict["selector_score_plan"] = selector.plan_summary(allele)
result_dict["unselected_accuracy_score_percentile"] = unselected_score_percentile
result_dict["unselected_score"] = unselected_score
result_dict["unselected_score_scrambled_mean"] = unselected_score_scrambled_mean
result_dict["selected"] = selected
result_dict["num_models"] = len(selected.neural_networks) if selected else None
return result_dict
def cache_encoding(predictor, peptides):
# Encode the peptides for each neural network, so the encoding
# becomes cached.
for network in predictor.neural_networks:
network.peptides_to_network_input(peptides)
class ScoreFunction(object):
"""
Thin wrapper over a score function (Class1AffinityPredictor -> float).
Used to keep a summary string associated with the function.
"""
def __init__(self, function, summary=None):
self.function = function
self.summary = summary if summary else "(n/a)"
def __call__(self, *args, **kwargs):
return self.function(*args, **kwargs)
class CombinedModelSelector(object):
"""
Model selector that computes a weighted average over other model selectors.
"""
def __init__(self, model_selectors, weights=None, min_contribution_percent=1.0):
if weights is None:
weights = numpy.ones(shape=(len(model_selectors),))
self.model_selectors = model_selectors
self.selector_to_weight = dict(zip(self.model_selectors, weights))
self.min_contribution_percent = min_contribution_percent
def usable_for_allele(self, allele):
return any(
selector.usable_for_allele(allele)
for selector in self.model_selectors)
def plan_summary(self, allele):
return self.score_function(allele, dry_run=True).summary
def score_function(self, allele, dry_run=False):
selector_to_max_weighted_score = {}
for selector in self.model_selectors:
weight = self.selector_to_weight[selector]
if selector.usable_for_allele(allele):
max_weighted_score = selector.max_absolute_value(allele) * weight
else:
max_weighted_score = 0
selector_to_max_weighted_score[selector] = max_weighted_score
max_total_score = sum(selector_to_max_weighted_score.values())
# Use only selectors that can contribute >1% to the total score
selectors_to_use = [
selector
for selector in self.model_selectors
if (
selector_to_max_weighted_score[selector] >
max_total_score * self.min_contribution_percent / 100.0)
]
summary = ", ".join([
"%s(|%.3f|)" % (
selector.plan_summary(allele),
selector_to_max_weighted_score[selector])
for selector in selectors_to_use
])
if dry_run:
score = None
else:
score_functions_and_weights = [
(selector.score_function(allele=allele),
self.selector_to_weight[selector])
for selector in selectors_to_use
]
def score(predictor, additional_metadata_out=None):
scores = numpy.array([
score_function(
predictor,
additional_metadata_out=additional_metadata_out) * weight
for (score_function, weight) in score_functions_and_weights
])
if additional_metadata_out is not None:
additional_metadata_out["combined_score_terms"] = str(
list(scores))
return scores.sum()
return ScoreFunction(score, summary=summary)
class ConsensusModelSelector(object):
"""
Model selector that scores sub-ensembles based on their Kendall tau
consistency with the full ensemble over a set of random peptides.
"""
def __init__(
self,
predictor,
num_peptides_per_length=10000,
multiply_score_by_value=10.0):
(min_length, max_length) = predictor.supported_peptide_lengths
peptides = []
for length in range(min_length, max_length + 1):
peptides.extend(
random_peptides(num_peptides_per_length, length=length))
self.peptides = EncodableSequences.create(peptides)
self.predictor = predictor
self.multiply_score_by_value = multiply_score_by_value
cache_encoding(self.predictor, self.peptides)
def usable_for_allele(self, allele):
return True
def max_absolute_value(self, allele):
return self.multiply_score_by_value
def plan_summary(self, allele):
return "consensus (%d points)" % len(self.peptides)
def score_function(self, allele):
full_ensemble_predictions = self.predictor.predict(
allele=allele,
peptides=self.peptides)
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=self.peptides,
)
tau = kendalltau(predictions, full_ensemble_predictions).correlation
if additional_metadata_out is not None:
additional_metadata_out["score_consensus_tau"] = tau
return tau * self.multiply_score_by_value
return ScoreFunction(
score, summary=self.plan_summary(allele))
class MSEModelSelector(object):
"""
Model selector that uses mean-squared error to score models. Inequalities
are supported.
"""
def __init__(
self,
df,
predictor,
min_measurements=1,
multiply_score_by_data_size=True):
self.df = df
self.predictor = predictor
self.min_measurements = min_measurements
self.multiply_score_by_data_size = multiply_score_by_data_size
def usable_for_allele(self, allele):
return (self.df.allele == allele).sum() >= self.min_measurements
def max_absolute_value(self, allele):
if self.multiply_score_by_data_size:
return (self.df.allele == allele).sum()
else:
return 1.0
def plan_summary(self, allele):
return self.score_function(allele).summary
def score_function(self, allele):
sub_df = self.df.loc[self.df.allele == allele].reset_index(drop=True)
peptides = EncodableSequences.create(sub_df.peptide.values)
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=peptides,
)
deviations = from_ic50(predictions) - from_ic50(
sub_df.measurement_value)
if 'measurement_inequality' in sub_df.columns:
# Must reverse meaning of inequality since we are working with
# transformed 0-1 values, which are anti-correlated with the ic50s.
# The measurement_inequality column is given in terms of ic50s.
deviations.loc[
(
(sub_df.measurement_inequality == "<") & (deviations > 0)) |
((sub_df.measurement_inequality == ">") & (deviations < 0))
] = 0.0
score_mse = (1 - (deviations ** 2).mean())
if additional_metadata_out is not None:
additional_metadata_out["score_MSE"] = 1 - score_mse
# We additionally include other scores on (=) measurements as
# a convenience
eq_df = sub_df
if 'measurement_inequality' in sub_df.columns:
eq_df = sub_df.loc[
sub_df.measurement_inequality == "="
]
additional_metadata_out["score_pearsonr"] = (
pearsonr(
numpy.log(eq_df.measurement_value.values),
numpy.log(predictions[eq_df.index.values]))[0])
for threshold in [500, 5000, 15000]:
if (eq_df.measurement_value < threshold).nunique() == 2:
additional_metadata_out["score_AUC@%d" % threshold] = (
roc_auc_score(
(eq_df.measurement_value < threshold).values,
-1 * predictions[eq_df.index.values]))
return score_mse * (
len(sub_df) if self.multiply_score_by_data_size else 1)
summary = "mse (%d points)" % (len(sub_df))
return ScoreFunction(score, summary=summary)
class MassSpecModelSelector(object):
"""
Model selector that uses PPV of differentiating decoys from hits from
mass-spec experiments.
"""
def __init__(
self,
df,
predictor,
decoys_per_length=0,
min_measurements=100,
multiply_score_by_data_size=True):
# Index is peptide, columns are alleles
hit_matrix = df.groupby(
["peptide", "allele"]).measurement_value.count().unstack().fillna(
0).astype(bool)
if decoys_per_length:
(min_length, max_length) = predictor.supported_peptide_lengths
decoys = []
for length in range(min_length, max_length + 1):
decoys.extend(
random_peptides(decoys_per_length, length=length))
decoy_matrix = pandas.DataFrame(
index=decoys, columns=hit_matrix.columns, dtype=bool)
decoy_matrix[:] = False
full_matrix = pandas.concat([hit_matrix, decoy_matrix])
else:
full_matrix = hit_matrix
if len(full_matrix) > 0:
full_matrix = full_matrix.sample(frac=1.0).astype(float)
self.df = full_matrix
self.predictor = predictor
self.min_measurements = min_measurements
self.multiply_score_by_data_size = multiply_score_by_data_size
self.peptides = EncodableSequences.create(full_matrix.index.values)
cache_encoding(self.predictor, self.peptides)
@staticmethod
def ppv(y_true, predictions):
df = pandas.DataFrame({"prediction": predictions, "y_true": y_true})
return df.sort_values("prediction", ascending=True)[
: int(y_true.sum())
].y_true.mean()
def usable_for_allele(self, allele):
return allele in self.df.columns and (
self.df[allele].sum() >= self.min_measurements)
def max_absolute_value(self, allele):
if self.multiply_score_by_data_size:
return self.df[allele].sum()
else:
return 1.0
def plan_summary(self, allele):
return self.score_function(allele).summary
def score_function(self, allele):
total_hits = self.df[allele].sum()
total_decoys = (self.df[allele] == 0).sum()
multiplier = total_hits if self.multiply_score_by_data_size else 1
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=self.peptides,
)
ppv = self.ppv(self.df[allele], predictions)
if additional_metadata_out is not None:
additional_metadata_out["score_mass_spec_PPV"] = ppv
# We additionally compute AUC score.
additional_metadata_out["score_mass_spec_AUC"] = roc_auc_score(
self.df[allele].values, -1 * predictions)
return ppv * multiplier
summary = "mass-spec (%d hits / %d decoys)" % (total_hits, total_decoys)
return ScoreFunction(score, summary=summary)
if __name__ == '__main__':
run()
|
CircuitPython_Made_Easy_On_CPX/cpx_temperature_neopixels.py | joewalk102/Adafruit_Learning_System_Guides | 665 | 14616 | import time
from adafruit_circuitplayground.express import cpx
import simpleio
cpx.pixels.auto_write = False
cpx.pixels.brightness = 0.3
# Set these based on your ambient temperature for best results!
minimum_temp = 24
maximum_temp = 30
while True:
# temperature value remapped to pixel position
peak = simpleio.map_range(cpx.temperature, minimum_temp, maximum_temp, 0, 10)
print(cpx.temperature)
print(int(peak))
for i in range(0, 10, 1):
if i <= peak:
cpx.pixels[i] = (0, 255, 255)
else:
cpx.pixels[i] = (0, 0, 0)
cpx.pixels.show()
time.sleep(0.05)
|
opendbc/generator/test_generator.py | darknight111/openpilot3 | 116 | 14619 | <gh_stars>100-1000
#!/usr/bin/env python3
import os
import filecmp
import tempfile
from opendbc.generator.generator import create_all, opendbc_root
def test_generator():
with tempfile.TemporaryDirectory() as d:
create_all(d)
ignore = [f for f in os.listdir(opendbc_root) if not f.endswith('_generated.dbc')]
comp = filecmp.dircmp(opendbc_root, d, ignore=ignore)
assert len(comp.diff_files) == 0, f"Different files: {comp.diff_files}"
test_generator()
|
mayan/apps/rest_api/classes.py | atitaya1412/Mayan-EDMS | 336 | 14621 | <filename>mayan/apps/rest_api/classes.py
from collections import namedtuple
import io
import json
from furl import furl
from django.core.handlers.wsgi import WSGIRequest
from django.http.request import QueryDict
from django.template import Variable, VariableDoesNotExist
from django.test.client import MULTIPART_CONTENT
from django.urls import resolve
from django.urls.exceptions import Resolver404
from mayan.apps.organizations.settings import setting_organization_url_base_path
from mayan.apps.templating.classes import Template
from .literals import API_VERSION
class BatchResponse:
def __init__(self, name, status_code, data, headers):
self.name = name
self.status_code = status_code
self.data = data
self.headers = headers
class NestableLazyIterator:
def __init__(
self, iterable_string, context, context_list_index, parent_iterator=None
):
self.iterable_string = iterable_string
self.context = context
self.context_list_index = context_list_index
self.parent_iterator = parent_iterator
self.items = None
self.index = 0
def __iter__(self):
return self
def __next__(self):
# Setup the initial values on the initial access.
if not self.items:
if self.parent_iterator:
next(self.parent_iterator)
self.update_iterable_object()
if self.index == len(self.items):
self.index = 0
if self.parent_iterator:
next(self.parent_iterator)
else:
raise StopIteration
self.update_iterable_object()
value = self.items[self.index]
self.context['iterables'][self.context_list_index] = value
self.index += 1
return value
def update_iterable_object(self):
self.items = Variable(var=self.iterable_string).resolve(context=self.context)
RenderedContent = namedtuple(
typename='RenderedContent', field_names=(
'body', 'include', 'method', 'name', 'url'
)
)
class BatchRequest:
def __init__(
self, collection, name, url, body=None, group_name=None,
include='true', iterables=None, method='GET'
):
self.collection = collection
self.body = body or {}
self.include = include
self.group_name = group_name
self.iterables = iterables
self.method = method
self.name = name
self.url = url
def execute(self):
if self.iterables:
# Initialize the iterables list to allow using any index.
self.collection.context['iterables'] = [None] * len(self.iterables)
iterator = None
for iterable_index, iterable in enumerate(self.iterables):
iterator = NestableLazyIterator(
context=self.collection.context,
context_list_index=iterable_index,
iterable_string=iterable, parent_iterator=iterator
)
while True:
try:
next(iterator)
except StopIteration:
break
except VariableDoesNotExist as exception:
self.collection.responses[self.name] = {
'data': {'error': str(exception)},
'include': 'true',
'is_response': True
}
return
else:
rendered_content = self.render_content()
BatchRequest(
collection=self.collection,
body=rendered_content.body,
group_name=self.group_name,
include=rendered_content.include,
method=rendered_content.method,
name=rendered_content.name,
url=rendered_content.url
).execute()
else:
rendered_content = self.render_content()
url_parts = furl(rendered_content.url)
try:
resolver_match = resolve(path=url_parts.pathstr)
except Resolver404 as exception:
self.collection.responses[rendered_content.name] = {
'data': {
'error': '"{}" not found'.format(exception.args[0]['path'])
},
'include': 'true',
'is_response': True,
'status_code': 404
}
return
else:
environ = getattr(
self.collection.view_request, 'environ', {}
).copy()
environ['REQUEST_METHOD'] = rendered_content.method
environ['PATH_INFO'] = self.url
environ['QUERY_STRING'] = url_parts.querystr
post_query_dict = QueryDict(mutable=True)
post_query_dict.update(rendered_content.body)
json_body = json.dumps(post_query_dict)
request_data = json_body.encode('utf-8')
environ['wsgi.input'] = io.BytesIO(request_data)
environ['CONTENT_LENGTH'] = str(len(request_data))
if rendered_content.method == 'POST':
environ['CONTENT_TYPE'] = MULTIPART_CONTENT
else:
environ['CONTENT_TYPE'] = 'application/octet-stream'
request = WSGIRequest(environ=environ)
request.LANGUAGE_CODE = getattr(
self.collection.view_request, 'LANGUAGE_CODE', None
)
request.POST = post_query_dict
request._read_started = True
request.auth = getattr(
self.collection.view_request, 'auth', None
)
request.csrf_processing_done = True
request.session = getattr(
self.collection.view_request, 'session', None
)
request.user = getattr(
self.collection.view_request, 'user', None
)
response = resolver_match.func(
request=request, **resolver_match.kwargs
)
result = {
'data': response.data,
'headers': {key: value for key, value in response.items()},
'include': rendered_content.include,
'is_response': True,
'status_code': response.status_code
}
self.collection.context[rendered_content.name] = result
self.collection.responses[rendered_content.name] = result
if self.group_name:
self.collection.context.setdefault('groups', {})
self.collection.context['groups'].setdefault(
self.group_name, []
)
self.collection.context['groups'][self.group_name].append(
result
)
def render_content(self):
rendered_body = {}
for key, value in self.body.items():
rendered_key = Template(template_string=key).render(
context=self.collection.context
)
rendered_value = Template(template_string=value).render(
context=self.collection.context
)
rendered_body[rendered_key] = rendered_value
rendered_include = Template(template_string=self.include).render(
context=self.collection.context
)
rendered_method = Template(template_string=self.method).render(
context=self.collection.context
)
rendered_name = Template(template_string=self.name).render(
context=self.collection.context
)
rendered_url = Template(template_string=self.url).render(
context=self.collection.context
)
return RenderedContent(
body=rendered_body, include=rendered_include,
method=rendered_method, name=rendered_name, url=rendered_url
)
class BatchRequestCollection:
def __init__(self, request_list=None):
self.requests = []
for request_index, request_dict in enumerate(request_list):
request_dict.update(
{'collection': self}
)
try:
self.requests.append(BatchRequest(**request_dict))
except Exception as exception:
raise ValueError(
'Error instantiating request #{}; {}'.format(
request_index, exception
)
) from exception
def execute(self, view_request):
self.context = {'view_request': view_request}
self.responses = {}
self.view_request = view_request
for request in self.requests:
request.execute()
# Convert responses in context into response class instances.
result = []
for key, value in self.responses.items():
if json.loads(s=value.get('include', 'true')):
result.append(
BatchResponse(
name=key,
status_code=value.get('status_code', 0),
data=value.get('data', {}),
headers=value.get('headers', {}),
)
)
return result
class Endpoint:
def __init__(self, label, viewname=None, kwargs=None):
self.label = label
self.kwargs = kwargs
if viewname:
self.viewname = viewname
else:
installation_base_url = setting_organization_url_base_path.value
if installation_base_url:
installation_base_url = '/{}'.format(installation_base_url)
else:
installation_base_url = ''
self.url = '{}/api/v{}/{}/'.format(
installation_base_url, API_VERSION, self.label
)
try:
self.viewname = resolve(path=self.url).view_name
except Resolver404:
self.viewname = None
|
pxr/base/tf/testenv/testTfStringUtils.py | DougRogers-DigitalFish/USD | 3,680 | 14637 | #!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr import Tf
import logging
import unittest
class TestStringUtils(unittest.TestCase):
"""
Test Tf String Utils (The python wrapped porting of the utility functions).
"""
def setUp(self):
self.log = logging.getLogger()
def test_StringSplit(self):
"""Testing StringSplit() function. This function is supposed to behave
like the split method on python string objects."""
self.log.info("Testing string split cases")
self.assertEqual([], Tf.StringSplit("",""))
self.assertEqual([], Tf.StringSplit("abcd",""))
self.assertEqual([], Tf.StringSplit("","ccc"))
s = "abcd"
self.assertEqual(s.split("a"), Tf.StringSplit(s, "a"))
self.assertEqual(s.split("b"), Tf.StringSplit(s, "b"))
self.assertEqual(s.split("c"), Tf.StringSplit(s, "c"))
self.assertEqual(s.split("d"), Tf.StringSplit(s, "d"))
self.assertEqual(s.split("abcd"), Tf.StringSplit(s, "abcd"))
self.assertEqual(s.split("ab"), Tf.StringSplit(s, "ab"))
s = "a:+b:+c:+d"
self.assertEqual(s.split(":+"), Tf.StringSplit(s, ":+"))
s = "a:+b:+c:d"
self.assertEqual(s.split(":+"), Tf.StringSplit(s, ":+"))
def test_Unicode(self):
"""Testing that we can pass python unicode objects to wrapped
functions expecting std::string"""
self.log.info("Testing unicode calls")
self.assertEqual(Tf.StringSplit('123', '2'), ['1', '3'])
self.assertEqual(Tf.StringSplit('123', u'2'), ['1', '3'])
self.assertEqual(Tf.StringSplit(u'123', '2'), ['1', '3'])
self.assertEqual(Tf.StringSplit(u'123', u'2'), ['1', '3'])
self.assertEqual(Tf.DictionaryStrcmp('apple', 'banana'), -1)
self.assertEqual(Tf.DictionaryStrcmp('apple', u'banana'), -1)
self.assertEqual(Tf.DictionaryStrcmp(u'apple', 'banana'), -1)
self.assertEqual(Tf.DictionaryStrcmp(u'apple', u'banana'), -1)
def test_StringToLong(self):
def checks(val):
self.assertEqual(Tf.StringToLong(repr(val)), val)
def checku(val):
self.assertEqual(Tf.StringToULong(repr(val)), val)
# A range of valid values.
for i in range(1000000):
checku(i)
for i in range(-500000, 500000):
checks(i)
# A wider range of valid values.
for i in range(0, 1000000000, 9337):
checks(i)
for i in range(-500000000, 500000000, 9337):
checks(i)
# Get the max/min values.
ulmax, lmax, lmin = (
Tf._GetULongMax(), Tf._GetLongMax(), Tf._GetLongMin())
# Check the extrema and one before to ensure they work.
for n in [ulmax-1, ulmax]:
checku(n)
for n in [lmin, lmin+1, lmax-1, lmax]:
checks(n)
# Check that some beyond the extrema over/underflow.
#
# Unsigned overflow.
for i in range(1, 1000):
with self.assertRaises(ValueError):
checku(ulmax + i)
with self.assertRaises(ValueError):
checks(lmax + i)
with self.assertRaises(ValueError):
checks(lmin - i)
def test_Identifiers(self):
self.assertFalse(Tf.IsValidIdentifier(''))
self.assertTrue(Tf.IsValidIdentifier('hello9'))
self.assertFalse(Tf.IsValidIdentifier('9hello'))
self.assertTrue(Tf.IsValidIdentifier('hello_world'))
self.assertTrue(Tf.IsValidIdentifier('HELLO_WORLD'))
self.assertTrue(Tf.IsValidIdentifier('hello_world_1234'))
self.assertFalse(Tf.IsValidIdentifier('hello_#world#_1234'))
self.assertFalse(Tf.IsValidIdentifier('h e l l o'))
self.assertEqual(Tf.MakeValidIdentifier(''), '_')
self.assertEqual(Tf.MakeValidIdentifier('hello9'), 'hello9')
self.assertEqual(Tf.MakeValidIdentifier('9hello'), '_hello')
self.assertEqual(
Tf.MakeValidIdentifier('hello_#world#_1234'), 'hello__world__1234')
self.assertFalse(Tf.IsValidIdentifier('h e l l o'), 'h_e_l_l_o')
self.assertFalse(Tf.IsValidIdentifier('!@#$%'), '_____')
if __name__ == '__main__':
unittest.main()
|
envelopes/envelope.py | siyaoyao/envelopes | 202 | 14647 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
envelopes.envelope
==================
This module contains the Envelope class.
"""
import sys
if sys.version_info[0] == 2:
from email import Encoders as email_encoders
elif sys.version_info[0] == 3:
from email import encoders as email_encoders
basestring = str
def unicode(_str, _charset):
return str(_str.encode(_charset), _charset)
else:
raise RuntimeError('Unsupported Python version: %d.%d.%d' % (
sys.version_info[0], sys.version_info[1], sys.version_info[2]
))
from email.header import Header
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
import os
import re
from .conn import SMTP
from .compat import encoded
class MessageEncodeError(Exception):
pass
class Envelope(object):
"""
The Envelope class.
**Address formats**
The following formats are supported for e-mail addresses:
* ``"<EMAIL>"`` - just the e-mail address part as a string,
* ``"Some User <<EMAIL>>"`` - name and e-mail address parts as a string,
* ``("<EMAIL>", "Some User")`` - e-mail address and name parts as a tuple.
Whenever you come to manipulate addresses feel free to use any (or all) of
the formats above.
:param to_addr: ``To`` address or list of ``To`` addresses
:param from_addr: ``From`` address
:param subject: message subject
:param html_body: optional HTML part of the message
:param text_body: optional plain text part of the message
:param cc_addr: optional single CC address or list of CC addresses
:param bcc_addr: optional single BCC address or list of BCC addresses
:param headers: optional dictionary of headers
:param charset: message charset
"""
ADDR_FORMAT = '%s <%s>'
ADDR_REGEXP = re.compile(r'^(.*) <([^@]+@[^@]+)>$')
def __init__(self, to_addr=None, from_addr=None, subject=None,
html_body=None, text_body=None, cc_addr=None, bcc_addr=None,
headers=None, charset='utf-8'):
if to_addr:
if isinstance(to_addr, list):
self._to = to_addr
else:
self._to = [to_addr]
else:
self._to = []
self._from = from_addr
self._subject = subject
self._parts = []
if text_body:
self._parts.append(('text/plain', text_body, charset))
if html_body:
self._parts.append(('text/html', html_body, charset))
if cc_addr:
if isinstance(cc_addr, list):
self._cc = cc_addr
else:
self._cc = [cc_addr]
else:
self._cc = []
if bcc_addr:
if isinstance(bcc_addr, list):
self._bcc = bcc_addr
else:
self._bcc = [bcc_addr]
else:
self._bcc = []
if headers:
self._headers = headers
else:
self._headers = {}
self._charset = charset
self._addr_format = unicode(self.ADDR_FORMAT, charset)
def __repr__(self):
return u'<Envelope from="%s" to="%s" subject="%s">' % (
self._addrs_to_header([self._from]),
self._addrs_to_header(self._to),
self._subject
)
@property
def to_addr(self):
"""List of ``To`` addresses."""
return self._to
def add_to_addr(self, to_addr):
"""Adds a ``To`` address."""
self._to.append(to_addr)
def clear_to_addr(self):
"""Clears list of ``To`` addresses."""
self._to = []
@property
def from_addr(self):
return self._from
@from_addr.setter
def from_addr(self, from_addr):
self._from = from_addr
@property
def cc_addr(self):
"""List of CC addresses."""
return self._cc
def add_cc_addr(self, cc_addr):
"""Adds a CC address."""
self._cc.append(cc_addr)
def clear_cc_addr(self):
"""Clears list of CC addresses."""
self._cc = []
@property
def bcc_addr(self):
"""List of BCC addresses."""
return self._bcc
def add_bcc_addr(self, bcc_addr):
"""Adds a BCC address."""
self._bcc.append(bcc_addr)
def clear_bcc_addr(self):
"""Clears list of BCC addresses."""
self._bcc = []
@property
def charset(self):
"""Message charset."""
return self._charset
@charset.setter
def charset(self, charset):
self._charset = charset
self._addr_format = unicode(self.ADDR_FORMAT, charset)
def _addr_tuple_to_addr(self, addr_tuple):
addr = ''
if len(addr_tuple) == 2 and addr_tuple[1]:
addr = self._addr_format % (
self._header(addr_tuple[1] or ''),
addr_tuple[0] or ''
)
elif addr_tuple[0]:
addr = addr_tuple[0]
return addr
@property
def headers(self):
"""Dictionary of custom headers."""
return self._headers
def add_header(self, key, value):
"""Adds a custom header."""
self._headers[key] = value
def clear_headers(self):
"""Clears custom headers."""
self._headers = {}
def _addrs_to_header(self, addrs):
_addrs = []
for addr in addrs:
if not addr:
continue
if isinstance(addr, basestring):
if self._is_ascii(addr):
_addrs.append(self._encoded(addr))
else:
# these headers need special care when encoding, see:
# http://tools.ietf.org/html/rfc2047#section-8
# Need to break apart the name from the address if there are
# non-ascii chars
m = self.ADDR_REGEXP.match(addr)
if m:
t = (m.group(2), m.group(1))
_addrs.append(self._addr_tuple_to_addr(t))
else:
# What can we do? Just pass along what the user gave us and hope they did it right
_addrs.append(self._encoded(addr))
elif isinstance(addr, tuple):
_addrs.append(self._addr_tuple_to_addr(addr))
else:
self._raise(MessageEncodeError,
'%s is not a valid address' % str(addr))
_header = ','.join(_addrs)
return _header
def _raise(self, exc_class, message):
raise exc_class(self._encoded(message))
def _header(self, _str):
if self._is_ascii(_str):
return _str
return Header(_str, self._charset).encode()
def _is_ascii(self, _str):
return all(ord(c) < 128 for c in _str)
def _encoded(self, _str):
return encoded(_str, self._charset)
def to_mime_message(self):
"""Returns the envelope as
:py:class:`email.mime.multipart.MIMEMultipart`."""
msg = MIMEMultipart('alternative')
msg['Subject'] = self._header(self._subject or '')
msg['From'] = self._encoded(self._addrs_to_header([self._from]))
msg['To'] = self._encoded(self._addrs_to_header(self._to))
if self._cc:
msg['CC'] = self._addrs_to_header(self._cc)
if self._headers:
for key, value in self._headers.items():
msg[key] = self._header(value)
for part in self._parts:
type_maj, type_min = part[0].split('/')
if type_maj == 'text' and type_min in ('html', 'plain'):
msg.attach(MIMEText(part[1], type_min, self._charset))
else:
msg.attach(part[1])
return msg
def add_attachment(self, file_path, mimetype=None):
"""Attaches a file located at *file_path* to the envelope. If
*mimetype* is not specified an attempt to guess it is made. If nothing
is guessed then `application/octet-stream` is used."""
if not mimetype:
mimetype, _ = mimetypes.guess_type(file_path)
if mimetype is None:
mimetype = 'application/octet-stream'
type_maj, type_min = mimetype.split('/')
with open(file_path, 'rb') as fh:
part_data = fh.read()
part = MIMEBase(type_maj, type_min)
part.set_payload(part_data)
email_encoders.encode_base64(part)
part_filename = os.path.basename(self._encoded(file_path))
part.add_header('Content-Disposition', 'attachment; filename="%s"'
% part_filename)
self._parts.append((mimetype, part))
def send(self, *args, **kwargs):
"""Sends the envelope using a freshly created SMTP connection. *args*
and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP`
constructor.
Returns a tuple of SMTP object and whatever its send method returns."""
conn = SMTP(*args, **kwargs)
send_result = conn.send(self)
return conn, send_result
|
test/test_format.py | GuyTuval/msgpack-python | 1,252 | 14661 | <reponame>GuyTuval/msgpack-python
#!/usr/bin/env python
# coding: utf-8
from msgpack import unpackb
def check(src, should, use_list=0, raw=True):
assert unpackb(src, use_list=use_list, raw=raw, strict_map_key=False) == should
def testSimpleValue():
check(b"\x93\xc0\xc2\xc3", (None, False, True))
def testFixnum():
check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff", ((0, 64, 127), (-32, -16, -1)))
def testFixArray():
check(b"\x92\x90\x91\x91\xc0", ((), ((None,),)))
def testFixRaw():
check(b"\x94\xa0\xa1a\xa2bc\xa3def", (b"", b"a", b"bc", b"def"))
def testFixMap():
check(
b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80", {False: {None: None}, True: {None: {}}}
)
def testUnsignedInt():
check(
b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00"
b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00"
b"\xce\xff\xff\xff\xff",
(0, 128, 255, 0, 32768, 65535, 0, 2147483648, 4294967295),
)
def testSignedInt():
check(
b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00"
b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00"
b"\xd2\xff\xff\xff\xff",
(0, -128, -1, 0, -32768, -1, 0, -2147483648, -1),
)
def testRaw():
check(
b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
(b"", b"a", b"ab", b"", b"a", b"ab"),
)
check(
b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
("", "a", "ab", "", "a", "ab"),
raw=False,
)
def testArray():
check(
b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00"
b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02"
b"\xc2\xc3",
((), (None,), (False, True), (), (None,), (False, True)),
)
def testMap():
check(
b"\x96"
b"\xde\x00\x00"
b"\xde\x00\x01\xc0\xc2"
b"\xde\x00\x02\xc0\xc2\xc3\xc2"
b"\xdf\x00\x00\x00\x00"
b"\xdf\x00\x00\x00\x01\xc0\xc2"
b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2",
(
{},
{None: False},
{True: False, None: False},
{},
{None: False},
{True: False, None: False},
),
)
|
test/scons-time/time/no-result.py | moroten/scons | 1,403 | 14682 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the time subcommand's --which option doesn't fail, and prints
an appropriate error message, if a log file doesn't have its specific
requested results.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
header = """\
set key bottom left
plot '-' title "Startup" with lines lt 1
# Startup
"""
footer = """\
e
"""
line_fmt = "%s 11.123456\n"
lines = []
for i in range(9):
logfile_name = 'foo-%s-0.log' % i
if i == 5:
test.write(test.workpath(logfile_name), "NO RESULTS HERE!\n")
else:
test.fake_logfile(logfile_name)
lines.append(line_fmt % i)
expect = [header] + lines + [footer]
stderr = "file 'foo-5-0.log' has no results!\n"
test.run(arguments = 'time --fmt gnuplot --which total foo*.log',
stdout = ''.join(expect),
stderr = stderr)
expect = [header] + [footer]
test.run(arguments = 'time --fmt gnuplot foo-5-0.log',
stdout = ''.join(expect),
stderr = stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
foo/pictureR/wordsTemplate.py | MangetsuC/arkHelper | 147 | 14724 | <filename>foo/pictureR/wordsTemplate.py<gh_stars>100-1000
from PIL import Image, ImageDraw, ImageFont
from numpy import asarray
from cv2 import cvtColor, COLOR_RGB2BGR, imshow, waitKey
from os import getcwd
def getFontSize_name(resolution):
x = resolution[0]
if x <= 1024:
return (16, (1024,576))
elif x <= 1280:
return (21, (1280,720))
elif x <= 1440:
return (23, (1440,810))
elif x <= 1600:
return (26, (1600,900))
else:
return (31, (1920,1080))
def getTemplatePic_CH(words, fontsize):
#字号典型值 基建干员名称23 进驻总览房屋名称28(1440*810) 基建干员名称30 进驻总览房屋名称38(1920*1080)
ttf = ImageFont.truetype(getcwd() + "/res/fonts/SourceHanSansCN-Regular.otf", fontsize) #字体选用思源黑体
wordsPic = Image.new('RGB', ttf.getsize(words))
wordsDraw = ImageDraw.Draw(wordsPic)
wordsDraw.text((0, 0), words, font=ttf, fill=(255,255,255)) #创建对应的模板
#temp = cvtColor(asarray(wordsPic), COLOR_RGB2BGR)
#imshow('test', temp)
#waitKey(0)
return cvtColor(asarray(wordsPic), COLOR_RGB2BGR)
def getTemplatePic_NUM(num, fontsize):
#字号典型值 进驻总览干员心情28
num = str(num)
ttf = ImageFont.truetype(getcwd() + "/res/fonts/Bender.otf", fontsize) #字体选用bender
wordsPic = Image.new('RGB', ttf.getsize(num), color = (255, 255, 255))
wordsDraw = ImageDraw.Draw(wordsPic)
wordsDraw.text((0, 0), num, font=ttf, fill=(0,0,0)) #创建对应的模板
return cvtColor(asarray(wordsPic), COLOR_RGB2BGR) |
tools/harness/tests/freemem.py | lambdaxymox/barrelfish | 111 | 14737 | ##########################################################################
# Copyright (c) 2009, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re
import tests
from common import TestCommon
from results import PassFailResult
@tests.add_test
class MemTest(TestCommon):
'''prints out free and total memory after system boot up'''
name = "freemem"
def get_modules(self, build, machine):
modules = super(MemTest, self).get_modules(build, machine)
modules.add_module("freemem")
return modules
def get_finish_string(self):
return "freemem done!"
def process_data(self, testdir, rawiter):
# the test passed iff the last line is the finish string
lastline = ''
for line in rawiter:
lastline = line
passed = lastline.startswith(self.get_finish_string())
return PassFailResult(passed)
|
src/reader/_plugins/enclosure_tags.py | mirekdlugosz/reader | 205 | 14750 | """
enclosure_tags
~~~~~~~~~~~~~~
Fix tags for MP3 enclosures (e.g. podcasts).
Adds a "with tags" link to a version of the file with tags set as follows:
* the entry title as title
* the feed title as album
* the entry/feed author as author
This plugin needs additional dependencies, use the ``unstable-plugins`` extra
to install them:
.. code-block:: bash
pip install reader[unstable-plugins]
To load::
READER_APP_PLUGIN='reader._plugins.enclosure_tags:init' \\
python -m reader serve
Implemented for https://github.com/lemon24/reader/issues/50.
Became a plugin in https://github.com/lemon24/reader/issues/52.
"""
import tempfile
from urllib.parse import urlparse
import mutagen.mp3
import requests
from flask import Blueprint
from flask import request
from flask import Response
from flask import stream_with_context
from flask import url_for
blueprint = Blueprint('enclosure_tags', __name__)
ALL_TAGS = ('album', 'title', 'artist')
SET_ONLY_IF_MISSING_TAGS = {'artist'}
@blueprint.route('/enclosure-tags', defaults={'filename': None})
@blueprint.route('/enclosure-tags/<filename>')
def enclosure_tags(filename):
def update_tags(file):
emp3 = mutagen.mp3.EasyMP3(file)
changed = False
for key in ALL_TAGS:
if key in SET_ONLY_IF_MISSING_TAGS and emp3.get(key):
continue
value = request.args.get(key)
if not value:
continue
emp3[key] = [value]
changed = True
if changed:
emp3.save(file)
file.seek(0)
def chunks(req):
# Send the headers as soon as possible.
# Some browsers wait for the headers before showing the "Save As" dialog.
yield ''
tmp = tempfile.TemporaryFile()
for chunk in req.iter_content(chunk_size=2 ** 20):
tmp.write(chunk)
tmp.seek(0)
update_tags(tmp)
try:
while True:
data = tmp.read(2 ** 20)
if not data:
break
yield data
finally:
tmp.close()
url = request.args['url']
req = requests.get(url, stream=True)
headers = {}
for name in ('Content-Type', 'Content-Disposition'):
if name in req.headers:
headers[name] = req.headers[name]
return Response(stream_with_context(chunks(req)), headers=headers)
def enclosure_tags_filter(enclosure, entry):
filename = urlparse(enclosure.href).path.split('/')[-1]
if not filename.endswith('.mp3'):
return []
args = {'url': enclosure.href, 'filename': filename}
if entry.title:
args['title'] = entry.title
if entry.feed.title:
args['album'] = entry.feed.title
if entry.author or entry.feed.author:
args['artist'] = entry.author or entry.feed.author
return [('with tags', url_for('enclosure_tags.enclosure_tags', **args))]
def init(app):
app.register_blueprint(blueprint)
app.reader_additional_enclosure_links.append(enclosure_tags_filter)
|
gw_full_latest/CloudTTN.py | rendikanyut/LowCostLoRaGw | 654 | 14763 | <gh_stars>100-1000
#-------------------------------------------------------------------------------
# Part of this Python script is taken from the Pycom NanoGateway
# https://github.com/pycom/pycom-libraries/tree/master/examples/lorawan-nano-gateway
#
# Adapted by <EMAIL>
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
import binascii
import json
import os
import socket
import time
import datetime
from dateutil import parser
import calendar
import sys
#don't generate pyc (no compilation of imported module) so change in key_* file can be done dynamically
sys.dont_write_bytecode = True
import key_TTN as key_LoRaWAN
netserv='TTN'
try:
key_LoRaWAN.source_list
except AttributeError:
key_LoRaWAN.source_list=[]
try:
key_LoRaWAN.lorawan_server
except AttributeError:
key_LoRaWAN.lorawan_server="router.eu.thethings.network"
try:
key_LoRaWAN.lorawan_port
except AttributeError:
key_LoRaWAN.lorawan_port=1700
PROTOCOL_VERSION = 2
PUSH_DATA = 0
PUSH_ACK = 1
PULL_DATA = 2
PULL_ACK = 4
PULL_RESP = 3
RX_PK = {
'rxpk': [{
'time': '',
'tmst': 0,
'chan': 0,
'rfch': 0,
'freq': 0,
'stat': 1,
'modu': 'LORA',
'datr': '',
'codr': '4/5',
'rssi': 0,
'lsnr': 0,
'size': 0,
'data': ''
}]
}
TX_ACK_PK = {
'txpk_ack': {
'error': ''
}
}
class LoRaWAN:
def __init__(self, id, frequency, bw, sf, server, port):
self.id = id
self.frequency = frequency
self.sf = sf
self.bw = bw
self.server = server
self.port = port
self.server_ip = None
self.sock = None
def start(self):
self._log('Cloud%s: gw id: {}' % netserv, self.id)
# get the server IP and create an UDP socket
try:
self.server_ip = socket.getaddrinfo(self.server, self.port)[0][-1]
self._log('Cloud%s: Opening UDP socket to {} ({}) port {}...' % netserv, self.server, self.server_ip[0], self.server_ip[1])
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setblocking(False)
except Exception as ex:
self._log('Cloud%s: Failed to connect to server: {}' % netserv, ex)
def _sf_bw_to_dr(self, sf, bw):
dr = 'SF' + str(sf)
if bw == 125:
return dr + 'BW125'
elif bw == 250:
return dr + 'BW250'
else:
return dr + 'BW500'
def rx_packet(self, ldata, datalen, tdata, tmst, rssi, snr):
RX_PK["rxpk"][0]["time"] = tdata
if tmst=='':
#in seconds, maybe we should put it in microsecond?
RX_PK["rxpk"][0]["tmst"] = calendar.timegm(time.gmtime())
else:
RX_PK["rxpk"][0]["tmst"] = int(tmst)
RX_PK["rxpk"][0]["freq"] = self.frequency
RX_PK["rxpk"][0]["datr"] = self._sf_bw_to_dr(self.sf, self.bw)
RX_PK["rxpk"][0]["rssi"] = rssi
RX_PK["rxpk"][0]["lsnr"] = snr
RX_PK["rxpk"][0]["data"] = ldata
RX_PK["rxpk"][0]["size"] = datalen
#packet = self._make_node_packet(rx_data, tdata, 0, self.sf, self.bw, rssi, snr)
packet=json.dumps(RX_PK)
self._push_data(packet)
self._log('Cloud%s: Received packet: {}' % netserv, packet)
def _push_data(self, data):
token = os.urandom(2)
packet = bytearray([PROTOCOL_VERSION]) + token + bytearray([PUSH_DATA]) + binascii.unhexlify(self.id) + data
#print ''.join('{:02x}'.format(x) for x in packet)
#self._log('Cloud%s: Try to forward packet: {}' % netserv, packet)
try:
self.sock.sendto(packet, self.server_ip)
self.sock.close()
except Exception as ex:
self._log('Cloud%s: Failed to push uplink packet to server: {}' % netserv, ex)
def _log(self, message, *args):
print('{}'.format(str(message).format(*args)))
# Testing with pau_lorawan_testing/Pau_testing_device 0x26011721
#
# python CloudTTN.py "QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==" "256,64,637605665,0,28,8,-45" "125,5,12,868100" "2019-03-25T18:46:00.528+01:00" "0000B827EBD1B236"
# or
# python CloudTTN.py "QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==" "256,64,637605665,0,28,8,-45" "125,5,12,868100" "`date +%FT%T%z`" "0000B827EBD1B236"
#
# get the base64 encrypted data from `Arduino_LoRa_temp` sending "Hello from UPPA"
#
# Hello from UPPA
# plain payload hex
# 48 65 6C 6C 6F 20 66 72 6F 6D 20 55 50 50 41
# Encrypting
# encrypted payload
# 84 21 84 D4 BF 3B 34 20 CC 93 48 CB 6B A8 58
# calculate MIC with NwkSKey
# transmitted LoRaWAN-like packet:
# MHDR[1] | DevAddr[4] | FCtrl[1] | FCnt[2] | FPort[1] | EncryptedPayload | MIC[4]
# 40 21 17 01 26 00 00 00 01 84 21 84 D4 BF 3B 34 20 CC 93 48 CB 6B A8 58 5E 6D 1E FA
# [base64 LoRaWAN HEADER+CIPHER+MIC]:QCEXASYAAAABhCGE1L87NCDMk0jLa6hYXm0e+g==
def main(ldata, pdata, rdata, tdata, gwid):
# this is common code to process packet information provided by the main gateway script (i.e. post_processing_gw.py)
# these information are provided in case you need them
arr = map(int,pdata.split(','))
dst=arr[0]
ptype=arr[1]
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
#if lora packet is received with an SX1301 concentrator, then the packet-formatter will pass the tmst field after the date information, separated by '*'
#i.e. "2019-03-25T18:46:00.528+01:00*29641444"
tmst=tdata.count('*')
if (tmst != 0):
tdata_tmp=tdata.split('*')[0]
tmst=tdata.split('*')[1]
tdata=tdata_tmp
else:
tmst=''
#from 2019-05-14T14:53:10.241191+02:00 (similar to command date +%FT%T.%6N%z)
#to 2019-05-14T14:53:10.241191Z (similar to command date +%FT%T.%6NZ)
dt = parser.parse(tdata)
#in case you want to remove microsecond
#tdata = dt.replace(microsecond=0,tzinfo=None).isoformat()+"Z"
tdata = dt.replace(tzinfo=None).isoformat()+"Z"
arr = map(int,rdata.split(','))
rbw=arr[0]
rcr=arr[1]
rsf=arr[2]
rfq=arr[3]/1000.0
#LoRaWAN packet
if dst==256:
src_str="0x%0.8X" % src
#we force to BW125 as TTN is can not handle other bandwidth right now, for instance those of Lora 2.4GHz
#TODO: change when TTN will support LoRa 2.4GHz
rbw=125
else:
src_str=str(src)
if (src_str in key_LoRaWAN.source_list) or (len(key_LoRaWAN.source_list)==0):
#build the ttn_gwid which is defined to be gwid[4:10]+"FFFF"+gwid[10:]
#gwid is normally defined as eth0 MAC address filled by 0 in front: 0000B827EBD1B236
ttn_gwid=gwid[4:10]+"FFFF"+gwid[10:]
ttn = LoRaWAN(
id=ttn_gwid,
frequency=rfq,
bw=rbw,
sf=rsf,
server=key_LoRaWAN.lorawan_server,
port=key_LoRaWAN.lorawan_port)
ttn.start()
ttn.rx_packet(ldata, datalen, tdata, tmst, RSSI, SNR)
else:
print "Source is not is source list, not sending to %s" % netserv
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/Workset.py | htlcnn/ironpython-stubs | 182 | 14772 | class Workset(WorksetPreview,IDisposable):
""" Represents a workset in the document. """
@staticmethod
def Create(document,name):
"""
Create(document: Document,name: str) -> Workset
Creates a new workset.
document: The document in which the new instance is created.
name: The workset name.
Returns: Returns the newly created workset.
"""
pass
def Dispose(self):
""" Dispose(self: WorksetPreview,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: WorksetPreview,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsEditable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is editable.
Get: IsEditable(self: Workset) -> bool
"""
IsOpen=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is open (rather than closed).
Get: IsOpen(self: Workset) -> bool
"""
IsVisibleByDefault=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is visible by default.
Get: IsVisibleByDefault(self: Workset) -> bool
"""
Kind=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Kind of the workset.
Get: Kind(self: Workset) -> WorksetKind
"""
|
tf_quant_finance/datetime/constants.py | slowy07/tf-quant-finance | 3,138 | 14794 | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Date-related constants and enums."""
import enum
class Month(enum.Enum):
"""Months. Values are one-based."""
JANUARY = 1
FEBUARY = 2
MARCH = 3
APRIL = 4
MAY = 5
JUNE = 6
JULY = 7
AUGUST = 8
SEPTEMBER = 9
OCTOBER = 10
NOVEMBER = 11
DECEMBER = 12
class WeekDay(enum.Enum):
"""Named days of the week. Values are zero-based with Monday = 0."""
# We follow Python datetime convention of starting from 0.
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
class PeriodType(enum.Enum):
"""Periods that can be added or subtracted from DateTensors."""
DAY = 0
WEEK = 1
MONTH = 2
YEAR = 3
class BusinessDayConvention(enum.Enum):
"""Conventions that determine how to roll dates that fall on holidays.
* `NONE`: No adjustment
* `FOLLOWING`: Choose the first business day after the given holiday.
* `MODIFIED_FOLLOWING`: Choose the first business day after the given holiday
unless that day falls in the next calendar month, in which case choose the
first business day before the holiday.
* `PRECEDING`: Choose the first business day before the given holiday.
* `MODIFIED_PRECEDING`: Choose the first business day before the given holiday
unless that day falls in the previous calendar month, in which case choose the
first business day after the holiday.
"""
NONE = 0
FOLLOWING = 1
MODIFIED_FOLLOWING = 2
PRECEDING = 3
MODIFIED_PRECEDING = 4
# TODO(b/148011715): add NEAREST convention.
class WeekendMask(object):
"""Provides weekend masks for some of the common weekend patterns."""
# E.g. US/UK/Europe etc.
SATURDAY_SUNDAY = (0, 0, 0, 0, 0, 1, 1)
# E.g. Most countries in the Middle East.
FRIDAY_SATURDAY = (0, 0, 0, 0, 1, 1, 0)
# E.g. India, Nepal.
SUNDAY_ONLY = (0, 0, 0, 0, 0, 0, 1)
# Default value.
NONE = (0, 0, 0, 0, 0, 0, 0)
|
generate_trajectories.py | keuntaeklee/pytorch-PPUU | 159 | 14795 | import argparse, pdb
import gym
import numpy as np
import os
import pickle
import random
import torch
import scipy.misc
from gym.envs.registration import register
parser = argparse.ArgumentParser()
parser.add_argument('-display', type=int, default=0)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-lanes', type=int, default=3)
parser.add_argument('-traffic_rate', type=int, default=15)
parser.add_argument('-state_image', type=int, default=1)
parser.add_argument('-save_images', type=int, default=0)
parser.add_argument('-store', type=int, default=1)
parser.add_argument('-data_dir', type=str, default='traffic-data/state-action-cost/')
parser.add_argument('-fps', type=int, default=30)
parser.add_argument('-time_slot', type=int, default=0)
parser.add_argument('-map', type=str, default='i80', choices={'ai', 'i80', 'us101', 'lanker', 'peach'})
parser.add_argument('-delta_t', type=float, default=0.1)
opt = parser.parse_args()
opt.state_image = (opt.state_image == 1)
opt.store = (opt.store == 1)
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
os.system("mkdir -p " + opt.data_dir)
kwargs = dict(
display=opt.display,
state_image=opt.state_image,
store=opt.store,
fps=opt.fps,
nb_lanes=opt.lanes,
traffic_rate=opt.traffic_rate,
data_dir=opt.data_dir,
delta_t=opt.delta_t,
)
register(
id='Traffic-v0',
entry_point='traffic_gym:Simulator',
kwargs=kwargs
)
register(
id='I-80-v0',
entry_point='map_i80:I80',
kwargs=kwargs
)
gym.envs.registration.register(
id='US-101-v0',
entry_point='map_us101:US101',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Lankershim-v0',
entry_point='map_lanker:Lankershim',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Peachtree-v0',
entry_point='map_peach:Peachtree',
kwargs=kwargs,
)
env_names = {
'ai': 'Traffic-v0',
'i80': 'I-80-v0',
'us101': 'US-101-v0',
'lanker': 'Lankershim-v0',
'peach': 'Peachtree-v0',
}
print('Building the environment (loading data, if any)')
env = gym.make(env_names[opt.map])
env.reset(frame=0, time_slot=opt.time_slot)
done = False
while not done:
observation, reward, done, info = env.step()
env.render()
print(f'Data generation for <{opt.map}, time slot {opt.time_slot}> completed')
|
curriculum/experiments/goals/point_nd/goal_point_nd_trpo.py | coco-robotics/rllab-curriculum | 115 | 14807 | <filename>curriculum/experiments/goals/point_nd/goal_point_nd_trpo.py
from curriculum.utils import set_env_no_gpu, format_experiment_prefix
set_env_no_gpu()
import argparse
import math
import os
import os.path as osp
import sys
import random
from multiprocessing import cpu_count
import numpy as np
import tensorflow as tf
from rllab.misc.instrument import run_experiment_lite
from rllab import config
from rllab.misc.instrument import VariantGenerator
from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from curriculum.envs.ndim_point.point_env import PointEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from curriculum.envs.goal_env import GoalExplorationEnv, evaluate_goal_env
from curriculum.envs.base import FixedStateGenerator, UniformStateGenerator
from curriculum.state.evaluator import *
from curriculum.logging.html_report import format_dict, HTMLReport
from curriculum.logging.visualization import *
from curriculum.logging.logger import ExperimentLogger
from curriculum.experiments.goals.point_nd.utils import plot_policy_performance
EXPERIMENT_TYPE = osp.basename(__file__).split('.')[0]
def run_task(v):
random.seed(v['seed'])
np.random.seed(v['seed'])
# goal generators
logger.log("Initializing the goal generators and the inner env...")
inner_env = normalize(PointEnv(dim=v['goal_size'], state_bounds=v['state_bounds']))
print("the state_bounds are: ", v['state_bounds'])
center = np.zeros(v['goal_size'])
uniform_goal_generator = UniformStateGenerator(state_size=v['goal_size'], bounds=v['goal_range'],
center=center)
feasible_goal_ub = np.array(v['state_bounds'])[:v['goal_size']]
print("the feasible_goal_ub is: ", feasible_goal_ub)
uniform_feasible_goal_generator = UniformStateGenerator(state_size=v['goal_size'], bounds=[-1 * feasible_goal_ub,
feasible_goal_ub])
env = GoalExplorationEnv(
env=inner_env, goal_generator=uniform_goal_generator,
obs2goal_transform=lambda x: x[:int(len(x) / 2)],
terminal_eps=v['terminal_eps'],
only_feasible=v['only_feasible'],
distance_metric=v['distance_metric'],
terminate_env=True, goal_weight=v['goal_weight'],
) # this goal_generator will be updated by a uniform after
if v['sample_unif_feas']:
env.update_goal_generator(uniform_feasible_goal_generator)
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32),
# Fix the variance since different goals will require different variances, making this parameter hard to learn.
learn_std=False,
output_gain=v['output_gain'],
init_std=v['policy_init_std'],
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_traj = 3
# feasible_goals = generate_initial_goals(env, policy, v['goal_range'], horizon=v['horizon'], size=10000) #v['horizon'])
# print(feasible_goals)
# uniform_list_goal_generator = UniformListStateGenerator(goal_list=feasible_goals.tolist())
# env.update_goal_generator(uniform_list_goal_generator)
# env.update_goal_generator(fixed_goal_generator)
logger.log("Initializing report and plot_policy_reward...")
log_dir = logger.get_snapshot_dir()
inner_log_dir = osp.join(log_dir, 'inner_iters')
report = HTMLReport(osp.join(log_dir, 'report.html'), images_per_row=3)
report.add_header("{}".format(EXPERIMENT_TYPE))
report.add_text(format_dict(v))
logger.log("Starting the outer iterations")
for outer_iter in range(v['outer_iters']):
logger.log("Outer itr # %i" % outer_iter)
logger.log("Perform TRPO with UniformListStateGenerator...")
with ExperimentLogger(inner_log_dir, outer_iter, snapshot_mode='last', hold_outter_log=True):
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=v['pg_batch_size'],
max_path_length=v['horizon'],
n_itr=v['inner_iters'],
discount=0.995,
step_size=0.01,
plot=False,
)
algo.train()
report.add_image(
plot_policy_performance(policy, env, v['horizon'])
)
# log some more on how the pendulum performs the upright and general task
old_goal_generator = env.goal_generator
logger.log("Evaluating performance on Unif and Fix Goal Gen...")
with logger.tabular_prefix('UnifFeasGoalGen_'):
env.update_goal_generator(uniform_feasible_goal_generator)
evaluate_goal_env(env, policy=policy, horizon=v['horizon'], n_goals=50,
fig_prefix='UnifFeasGoalGen_itr%d' % outer_iter,
report=report, n_traj=n_traj)
# back to old goal generator
with logger.tabular_prefix("UnifGoalGen_"):
env.update_goal_generator(old_goal_generator)
evaluate_goal_env(env, policy=policy, horizon=v['horizon'], n_goals=50,
fig_prefix='UnifGoalGen_itr%d' % outer_iter,
report=report, n_traj=n_traj)
logger.dump_tabular(with_prefix=False)
report.save()
report.new_row()
with logger.tabular_prefix('FINALUnifFeasGoalGen_'):
env.update_goal_generator(uniform_feasible_goal_generator)
evaluate_goal_env(env, policy=policy, horizon=v['horizon'], n_goals=5e3, fig_prefix='FINAL1UnifFeasGoalGen_',
report=report, n_traj=n_traj)
evaluate_goal_env(env, policy=policy, horizon=v['horizon'], n_goals=5e3, fig_prefix='FINAL2UnifFeasGoalGen_',
report=report, n_traj=n_traj)
logger.dump_tabular(with_prefix=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ec2', '-e', action='store_true', default=False, help="add flag to run in ec2")
parser.add_argument('--clone', '-c', action='store_true', default=False,
help="add flag to copy file and checkout current")
parser.add_argument('--local_docker', '-d', action='store_true', default=False,
help="add flag to run in local dock")
parser.add_argument('--type', '-t', type=str, default='', help='set instance type')
parser.add_argument('--price', '-p', type=str, default='', help='set betting price')
parser.add_argument('--subnet', '-sn', type=str, default='', help='set subnet like us-west-1a')
parser.add_argument('--name', '-n', type=str, default='', help='set exp prefix name and new file name')
parser.add_argument('--debug', action='store_true', default=False, help="run code without multiprocessing")
parser.add_argument(
'--prefix', type=str, default=None,
help='set the additional name for experiment prefix'
)
args = parser.parse_args()
# setup ec2
ec2_instance = args.type if args.type else 'm4.4xlarge'
# configure instance
info = config.INSTANCE_TYPE_INFO[ec2_instance]
config.AWS_INSTANCE_TYPE = ec2_instance
config.AWS_SPOT_PRICE = str(info["price"])
n_parallel = int(info["vCPU"]) # make the default 4 if not using ec2
if args.ec2:
mode = 'ec2'
elif args.local_docker:
mode = 'local_docker'
n_parallel = cpu_count() if not args.debug else 1
else:
mode = 'local'
n_parallel = cpu_count() if not args.debug else 1
default_prefix = 'goal-point-nd-trpo'
if args.prefix is None:
exp_prefix = format_experiment_prefix(default_prefix)
elif args.prefix == '':
exp_prefix = default_prefix
else:
exp_prefix = '{}_{}'.format(default_prefix, args.prefix)
vg = VariantGenerator()
vg.add('seed', range(30, 90, 20))
# # GeneratorEnv params
vg.add('goal_size', [2, 3, 4, 5, 6]) # this is the ultimate goal we care about: getting the pendulum upright
vg.add('terminal_eps', lambda goal_size: [math.sqrt(goal_size) / math.sqrt(2) * 0.3])
vg.add('only_feasible', [True])
vg.add('goal_range', [5]) # this will be used also as bound of the state_space
vg.add('state_bounds', lambda goal_range, goal_size, terminal_eps:
[(1, goal_range) + (0.3,) * (goal_size - 2) + (goal_range, ) * goal_size])
vg.add('sample_unif_feas', [True])
vg.add('distance_metric', ['L2'])
vg.add('goal_weight', [1])
#############################################
vg.add('min_reward', lambda goal_weight: [goal_weight * 0.1]) # now running it with only the terminal reward of 1!
vg.add('max_reward', lambda goal_weight: [goal_weight * 0.9])
vg.add('horizon', [200])
vg.add('outer_iters', [200])
vg.add('inner_iters', [5])
vg.add('pg_batch_size', [20000])
# policy initialization
vg.add('output_gain', [1])
vg.add('policy_init_std', [1])
print('Running {} inst. on type {}, with price {}, parallel {}'.format(
vg.size, config.AWS_INSTANCE_TYPE,
config.AWS_SPOT_PRICE, n_parallel
))
for vv in vg.variants():
if mode in ['ec2', 'local_docker']:
run_experiment_lite(
# use_cloudpickle=False,
stub_method_call=run_task,
variant=vv,
mode=mode,
# Number of parallel workers for sampling
n_parallel=n_parallel,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
seed=vv['seed'],
# plot=True,
exp_prefix=exp_prefix,
# exp_name=exp_name,
sync_s3_pkl=True,
# for sync the pkl file also during the training
sync_s3_png=True,
sync_s3_html=True,
# # use this ONLY with ec2 or local_docker!!!
pre_commands=[
'export MPLBACKEND=Agg',
'pip install --upgrade pip',
'pip install --upgrade -I tensorflow',
'pip install git+https://github.com/tflearn/tflearn.git',
'pip install dominate',
'pip install multiprocessing_on_dill',
'pip install scikit-image',
'conda install numpy -n rllab3 -y',
],
)
if mode == 'local_docker':
sys.exit()
else:
run_experiment_lite(
# use_cloudpickle=False,
stub_method_call=run_task,
variant=vv,
mode='local',
n_parallel=n_parallel,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
seed=vv['seed'],
exp_prefix=exp_prefix,
print_command=False,
)
if args.debug:
sys.exit()
|
pyNastran/bdf/bdf_interface/encoding.py | ACea15/pyNastran | 293 | 14808 | <gh_stars>100-1000
def decode_lines(lines_bytes, encoding: str):
if isinstance(lines_bytes[0], bytes):
lines_str = [line.decode(encoding) for line in lines_bytes]
elif isinstance(lines_bytes[0], str):
lines_str = lines_bytes
else:
raise TypeError(type(lines_bytes[0]))
return lines_str
|
generator/src/googleapis/codegen/utilities/json_expander.py | romulobusatto/google-api-php-client-services | 709 | 14831 | #!/usr/bin/python2.7
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for simple JSON templates.
A JSON template is a dictionary of JSON data in which string values
may be simple templates in string.Template format (i.e.,
$dollarSignEscaping). By default, the template is expanded against
its own data, optionally updated with additional context.
"""
import json
from string import Template
import sys
__author__ = '<EMAIL> (<NAME>)'
def ExpandJsonTemplate(json_data, extra_context=None, use_self=True):
"""Recursively template-expand a json dict against itself or other context.
The context for string expansion is the json dict itself by default, updated
by extra_context, if supplied.
Args:
json_data: (dict) A JSON object where string values may be templates.
extra_context: (dict) Additional context for template expansion.
use_self: (bool) Whether to expand the template against itself, or only use
extra_context.
Returns:
A dict where string template values have been expanded against
the context.
"""
if use_self:
context = dict(json_data)
else:
context = {}
if extra_context:
context.update(extra_context)
def RecursiveExpand(obj):
if isinstance(obj, list):
return [RecursiveExpand(x) for x in obj]
elif isinstance(obj, dict):
return dict((k, RecursiveExpand(v)) for k, v in obj.iteritems())
elif isinstance(obj, (str, unicode)):
return Template(obj).safe_substitute(context)
else:
return obj
return RecursiveExpand(json_data)
if __name__ == '__main__':
if len(sys.argv) > 1:
json_in = open(sys.argv[1])
else:
json_in = sys.stdin
data = json.load(json_in)
expanded = ExpandJsonTemplate(data)
json.dump(expanded, sys.stdout, indent=2)
|
haiku/_src/integration/numpy_inputs_test.py | timwillhack/dm-haikuBah2 | 1,647 | 14875 | <reponame>timwillhack/dm-haikuBah2
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests whether modules produce similar output given np.ndarray inputs."""
import functools
from typing import Tuple
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
import numpy as np
ModuleFn = descriptors.ModuleFn
def tree_assert_allclose(a, b, *, atol=1e-6):
jax.tree_multimap(
functools.partial(np.testing.assert_allclose, atol=atol), a, b)
class NumpyInputsTest(parameterized.TestCase):
@test_utils.combined_named_parameters(
descriptors.ALL_MODULES,
test_utils.named_bools('np_inputs'),
test_utils.named_bools('np_params'),
test_utils.named_bools('close_over_params'))
def test_numpy_and_jax_results_close(
self,
module_fn: ModuleFn,
shape: Tuple[int, ...],
dtype: jnp.dtype,
np_params: bool,
np_inputs: bool,
close_over_params: bool,
):
if not (np_params or np_inputs):
self.skipTest('Pure JAX variants tested elsewhere')
f = hk.transform_with_state(lambda x: module_fn()(x)) # pylint: disable=unnecessary-lambda
rng = jax.random.PRNGKey(42)
x = jnp.ones(shape, dtype)
params, state = f.init(rng, x)
if close_over_params:
apply_fn = functools.partial(f.apply, params, state)
out, new_state = jax.jit(apply_fn)(rng, x)
else:
out, new_state = jax.jit(f.apply)(params, state, rng, x)
if np_inputs:
rng, x = jax.device_get((rng, x))
with self.subTest('init'):
params2, state2 = f.init(rng, x)
tree_assert_allclose(params, params2)
tree_assert_allclose(state, state2)
with self.subTest('apply'):
if np_params:
params, state = jax.device_get((params, state))
if close_over_params:
apply_fn = functools.partial(f.apply, params, state)
out2, new_state2 = jax.jit(apply_fn)(rng, x)
else:
out2, new_state2 = jax.jit(f.apply)(params, state, rng, x)
tree_assert_allclose(out, out2)
tree_assert_allclose(new_state, new_state2)
if __name__ == '__main__':
absltest.main()
|
tests/test_translator.py | Attsun1031/schematics | 1,430 | 14894 | # -*- coding: utf-8 -*-
import pytest
def test_translator():
def translator(string):
translations = {'String value is too long.': 'Tamanho de texto muito grande.'}
return translations.get(string, string)
from schematics.translator import register_translator
register_translator(translator)
from schematics.types import StringType
from schematics.exceptions import ValidationError
with pytest.raises(ValidationError) as exc:
StringType(max_length=1).validate_length('Abc')
assert exc.value == ['Tamanho de texto muito grande.']
|
osf/management/commands/populate_custom_taxonomies.py | gaybro8777/osf.io | 628 | 14909 | import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject
from osf.models.provider import rules_to_subjects
from scripts import utils as script_utils
from osf.models.validators import validate_subject_hierarchy
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
BEPRESS_PROVIDER = None
def validate_input(custom_provider, data, provider_type='osf.preprintprovider', copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
logger.info('Validating data')
includes = data.get('include', [])
excludes = data.get('exclude', [])
customs = data.get('custom', {})
merges = data.get('merge', {})
if copy:
included_subjects = rules_to_subjects(custom_provider.subjects_acceptable)
else:
assert not set(includes) & set(excludes), 'There must be no overlap between includes and excludes'
for text in includes:
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=text).exists(), 'Unable to find included subject with text {}'.format(text)
included_subjects = Subject.objects.filter(provider=BEPRESS_PROVIDER, text__in=includes).include_children()
logger.info('Successfully validated `include`')
for text in excludes:
try:
Subject.objects.get(provider=BEPRESS_PROVIDER, text=text)
except Subject.DoesNotExist:
raise RuntimeError('Unable to find excluded subject with text {}'.format(text))
assert included_subjects.filter(text=text).exists(), 'Excluded subject with text {} was not included'.format(text)
included_subjects = included_subjects.exclude(text__in=excludes)
logger.info('Successfully validated `exclude`')
for cust_name, map_dict in customs.items():
assert not included_subjects.filter(text=cust_name).exists(), 'Custom text {} already exists in mapped set'.format(cust_name)
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=map_dict.get('bepress')).exists(), 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress'))
if map_dict.get('parent'): # Null parent possible
assert map_dict['parent'] in set(customs.keys()) | set(included_subjects.values_list('text', flat=True)), 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent'])
# TODO: hierarchy length validation? Probably more trouble than worth here, done on .save
logger.info('Successfully validated `custom`')
included_subjects = included_subjects | Subject.objects.filter(text__in=[map_dict['bepress'] for map_dict in customs.values()])
for merged_from, merged_into in merges.items():
assert not included_subjects.filter(text=merged_from).exists(), 'Cannot merge subject "{}" that will be included'.format(merged_from)
assert merged_into in set(included_subjects.values_list('text', flat=True)) | set(customs.keys()), 'Unable to determine merge target for "{}"'.format(merged_into)
included_subjects = included_subjects | Subject.objects.filter(text__in=merges.keys())
missing_subjects = Subject.objects.filter(id__in=set([hier[-1].id for ps in Preprint.objects.filter(provider=custom_provider) for hier in ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id', flat=True))
if not add_missing:
assert not missing_subjects.exists(), 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text', flat=True)))
if isinstance(custom_provider, PreprintProvider):
assert custom_provider.share_title not in [None, '', 'bepress'], 'share title not set; please set the share title on this provider before creating a custom taxonomy.'
logger.info('Successfully validated mapping completeness')
return list(missing_subjects) if add_missing else None
def create_subjects_recursive(custom_provider, root_text, exclude_texts, parent=None):
logger.info('Duplicating BePress subject {} on {}'.format(root_text, custom_provider._id))
bepress_subj = Subject.objects.get(provider=BEPRESS_PROVIDER, text=root_text)
custom_subj = Subject(text=root_text, parent=parent, bepress_subject=bepress_subj, provider=custom_provider)
custom_subj.save()
# This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded.
# It could also be problematic if they didn't, if any of those children are used by existing preprints.
# TODO: Determine correct resolution
for child_text in bepress_subj.children.exclude(text__in=exclude_texts).values_list('text', flat=True):
create_subjects_recursive(custom_provider, child_text, exclude_texts, parent=custom_subj)
def create_from_subjects_acceptable(custom_provider, add_missing=False, missing=None):
tries = 0
subjects_to_copy = list(rules_to_subjects(custom_provider.subjects_acceptable))
if missing and add_missing:
subjects_to_copy = subjects_to_copy + missing
while len(subjects_to_copy):
previous_len = len(subjects_to_copy)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy))
for subj in list(subjects_to_copy):
if map_custom_subject(custom_provider, subj.text, subj.parent.text if subj.parent else None, subj.text):
subjects_to_copy.remove(subj)
elif add_missing and subj.parent and subj.parent not in subjects_to_copy:
# Dirty
subjects_to_copy.append(subj.parent)
previous_len += 1
else:
logger.warn('Failed. Retrying next iteration')
new_len = len(subjects_to_copy)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy))
def do_create_subjects(custom_provider, includes, excludes, copy=False, add_missing=False, missing=None):
if copy:
create_from_subjects_acceptable(custom_provider, add_missing=add_missing, missing=missing)
else:
for root_text in includes:
create_subjects_recursive(custom_provider, root_text, excludes)
def map_custom_subject(custom_provider, name, parent, mapping):
logger.info('Attempting to create subject {} on {} from {} with {}'.format(name, custom_provider._id, mapping, 'parent {}'.format(parent) if parent else 'no parent'))
if parent:
parent_subject = Subject.objects.filter(provider=custom_provider, text=parent).first()
else:
parent_subject = None
bepress_subject = Subject.objects.get(provider=BEPRESS_PROVIDER, text=mapping)
if parent and not parent_subject:
return False
custom_subject = Subject(provider=custom_provider, text=name, parent=parent_subject, bepress_subject=bepress_subject)
custom_subject.save()
return True
def do_custom_mapping(custom_provider, customs):
tries = 0
unmapped_customs = customs
while len(unmapped_customs):
previous_len = len(unmapped_customs)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input')
successes = []
for cust_name, map_dict in unmapped_customs.items():
if map_custom_subject(custom_provider, cust_name, map_dict.get('parent'), map_dict.get('bepress')):
successes.append(cust_name)
else:
logger.warn('Failed. Retrying next iteration')
[unmapped_customs.pop(key) for key in successes]
new_len = len(unmapped_customs)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- invalid input')
def map_preprints_to_custom_subjects(custom_provider, merge_dict, dry_run=False):
for preprint in Preprint.objects.filter(provider=custom_provider):
logger.info('Preparing to migrate preprint {}'.format(preprint.id))
old_hier = preprint.subject_hierarchy
subjects_to_map = [hier[-1] for hier in old_hier]
merged_subject_ids = set(Subject.objects.filter(provider=custom_provider, text__in=[merge_dict[k] for k in set(merge_dict.keys()) & set([s.text for s in subjects_to_map])]).values_list('id', flat=True))
subject_ids_to_map = set(s.id for s in subjects_to_map if s.text not in merge_dict.keys())
aliased_subject_ids = set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map, provider=custom_provider).values_list('id', flat=True)) | merged_subject_ids
aliased_hiers = [s.object_hierarchy for s in Subject.objects.filter(id__in=aliased_subject_ids)]
old_subjects = list(preprint.subjects.values_list('id', flat=True))
preprint.subjects.clear()
for hier in aliased_hiers:
validate_subject_hierarchy([s._id for s in hier])
for s in hier:
preprint.subjects.add(s)
# Update preprint in SHARE
if not dry_run:
on_preprint_updated(preprint._id, old_subjects=old_subjects)
preprint.reload()
new_hier = [s.object_hierarchy for s in preprint.subjects.exclude(children__in=preprint.subjects.all())]
logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id, old_hier, new_hier))
def migrate(provider=None, provider_type='osf.preprintprovider', share_title=None, data=None, dry_run=False, copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
if not BEPRESS_PROVIDER:
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
custom_provider = AbstractProvider.objects.filter(_id=provider, type=provider_type).first()
assert custom_provider, 'Unable to find specified provider: {}'.format(provider)
assert custom_provider.id != BEPRESS_PROVIDER.id, 'Cannot add custom mapping to BePress provider'
assert not custom_provider.subjects.exists(), 'Provider aldready has a custom taxonomy'
if isinstance(custom_provider, PreprintProvider) and custom_provider.share_title in [None, '', 'bepress']:
if not share_title:
raise RuntimeError('`--share-title` is required if not already set on the provider')
custom_provider.share_title = share_title
custom_provider.save()
missing = validate_input(custom_provider, data, provider_type=provider_type, copy=copy, add_missing=add_missing)
do_create_subjects(custom_provider, data['include'], data.get('exclude', []), copy=copy, add_missing=add_missing, missing=missing)
do_custom_mapping(custom_provider, data.get('custom', {}))
map_preprints_to_custom_subjects(custom_provider, data.get('merge', {}), dry_run=dry_run)
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--data',
action='store',
dest='data',
help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],'
'\n"exclude": [<list of children to exclude from included trees>],'
'\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]'
'\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}',
)
parser.add_argument(
'--provider',
action='store',
dest='provider',
required=True,
help='_id of the <provider> object, e.g. "osf". <provider> is expected to not already have a custom taxonomy.'
)
parser.add_argument(
'--from-subjects-acceptable',
action='store_true',
dest='from_subjects_acceptable',
help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used'
)
parser.add_argument(
'--add-missing',
action='store_true',
dest='add_missing',
help='Adds "used-but-not-included" subjects.'
)
parser.add_argument(
'--share-title',
action='store',
type=str,
dest='share_title',
help='Sets <provider>.share_title. Ignored if already set on provider, required if not.'
)
parser.add_argument(
'--type',
action='store',
type=str,
dest='provider_type',
help='Specifies provider type [`osf.preprintprovider`, `osf.registrationprovider`, `osf.collectionprovider`]'
)
def handle(self, *args, **options):
global BEPRESS_PROVIDER
provider_type = options.get('provider_type') or 'osf.preprintprovider'
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
dry_run = options.get('dry_run')
provider = options['provider']
data = json.loads(options['data'] or '{}')
share_title = options.get('share_title')
copy = options.get('from_subjects_acceptable')
add_missing = options.get('add_missing')
if copy:
data['include'] = list(Subject.objects.filter(provider=BEPRESS_PROVIDER, parent__isnull=True).values_list('text', flat=True))
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
migrate(provider=provider, share_title=share_title, provider_type=provider_type, data=data, dry_run=dry_run, copy=copy, add_missing=add_missing)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
|
modules/mongodb_atlas/mongodb_atlas.py | riddopic/opta | 595 | 14935 | <filename>modules/mongodb_atlas/mongodb_atlas.py
import os
from typing import TYPE_CHECKING
from modules.base import ModuleProcessor
from opta.core.terraform import get_terraform_outputs
from opta.exceptions import UserErrors
if TYPE_CHECKING:
from opta.layer import Layer
from opta.module import Module
class MongodbAtlasProcessor(ModuleProcessor):
def __init__(self, module: "Module", layer: "Layer"):
if module.data["type"] != "mongodb-atlas":
raise Exception(
f"The module {module.name} was expected to be of type mongodb-atlas"
)
super(MongodbAtlasProcessor, self).__init__(module, layer)
def pre_hook(self, module_idx: int) -> None:
required_env_set = set(["MONGODB_ATLAS_PUBLIC_KEY", "MONGODB_ATLAS_PRIVATE_KEY"])
if not required_env_set.issubset(set(os.environ.keys())):
raise UserErrors(
"Opta did not find environment variable(s), please set them and retry: {}".format(
required_env_set - set(os.environ.keys())
)
)
super(MongodbAtlasProcessor, self).pre_hook(module_idx)
def process(self, module_idx: int) -> None:
self.module.data["cloud_provider"] = self.layer.cloud.upper()
if self.module.data["cloud_provider"] == "LOCAL":
self.module.data["cloud_provider"] = "AWS" # For local, always spin up in AWS
self.module.data["region"] = "US_EAST_1"
base_layer = self.layer.root()
root_outputs = get_terraform_outputs(base_layer)
self.module.data["public_nat_ips"] = root_outputs["public_nat_ips"]
super(MongodbAtlasProcessor, self).process(module_idx)
|
plugins/hashsum_download/girder_hashsum_download/settings.py | JKitok/girder | 395 | 14972 | from girder.exceptions import ValidationException
from girder.utility import setting_utilities
class PluginSettings:
AUTO_COMPUTE = 'hashsum_download.auto_compute'
@setting_utilities.default(PluginSettings.AUTO_COMPUTE)
def _defaultAutoCompute():
return False
@setting_utilities.validator(PluginSettings.AUTO_COMPUTE)
def _validateAutoCompute(doc):
if not isinstance(doc['value'], bool):
raise ValidationException('Auto-compute hash setting must be true or false.')
|
ccmlib/cluster_factory.py | justinchuch/ccm | 626 | 14974 |
from __future__ import absolute_import
import os
import yaml
from ccmlib import common, extension, repository
from ccmlib.cluster import Cluster
from ccmlib.dse_cluster import DseCluster
from ccmlib.node import Node
from distutils.version import LooseVersion #pylint: disable=import-error, no-name-in-module
class ClusterFactory():
@staticmethod
def load(path, name):
cluster_path = os.path.join(path, name)
filename = os.path.join(cluster_path, 'cluster.conf')
with open(filename, 'r') as f:
data = yaml.safe_load(f)
try:
install_dir = None
if 'install_dir' in data:
install_dir = data['install_dir']
repository.validate(install_dir)
if install_dir is None and 'cassandra_dir' in data:
install_dir = data['cassandra_dir']
repository.validate(install_dir)
cassandra_version = None
if 'cassandra_version' in data:
cassandra_version = LooseVersion(data['cassandra_version'])
if common.isDse(install_dir):
cluster = DseCluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version)
else:
cluster = Cluster(path, data['name'], install_dir=install_dir, create_directory=False, derived_cassandra_version=cassandra_version)
node_list = data['nodes']
seed_list = data['seeds']
if 'partitioner' in data:
cluster.partitioner = data['partitioner']
if 'config_options' in data:
cluster._config_options = data['config_options']
if 'dse_config_options' in data:
cluster._dse_config_options = data['dse_config_options']
if 'misc_config_options' in data:
cluster._misc_config_options = data['misc_config_options']
if 'log_level' in data:
cluster.__log_level = data['log_level']
if 'use_vnodes' in data:
cluster.use_vnodes = data['use_vnodes']
if 'datadirs' in data:
cluster.data_dir_count = int(data['datadirs'])
extension.load_from_cluster_config(cluster, data)
except KeyError as k:
raise common.LoadError("Error Loading " + filename + ", missing property:" + k)
for node_name in node_list:
cluster.nodes[node_name] = Node.load(cluster_path, node_name, cluster)
for seed in seed_list:
cluster.seeds.append(seed)
return cluster
|
causalnex/structure/pytorch/dist_type/_base.py | Rishab26/causalnex | 1,523 | 14975 | # Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``causalnex.pytorch.dist_type._base`` defines the distribution type class interface and default behavior.
"""
import itertools
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy as np
import torch
from causalnex.structure.structuremodel import StructureModel
class DistTypeBase(metaclass=ABCMeta):
"""Base class defining the distribution default behavior and interface"""
def __init__(self, idx: int):
"""
Default constructor for the DistTypeBase class.
Unless overridden, provides default behavior to all subclasses.
Args:
idx: Positional index in data passed to the NOTEARS algorithm
which correspond to this datatype.
"""
self.idx = idx
def get_columns(
self,
X: np.ndarray,
) -> np.ndarray:
"""
Gets the column(s) associated with the instantiated DistType.
Args:
X: Full dataset to be selected from.
Returns:
1d or 2d np.ndarray of columns.
"""
return X[:, self.idx]
# pylint: disable=no-self-use
# pylint: disable=unused-argument
def preprocess_X(self, X: np.ndarray, fit_transform: bool = True) -> np.ndarray:
"""
Overload this method to perform any required preprocessing of the data
matrix. This can include data conversion, column expansion etc.
Changes to the tabu parameters should also be done here.
**WARN** This preprocessing CANNOT reorder the columns of X.
Args:
X: The original passed-in data.
fit_transform: Whether the class first fits
then transforms the data, or just transforms.
Just transforming is used to preprocess new data after the
initial NOTEARS fit.
Returns:
Preprocessed X
"""
return X
# pylint: disable=no-self-use
def preprocess_tabu_edges(
self, tabu_edges: List[Tuple[int, int]]
) -> List[Tuple[int, int]]:
"""
Overload this method to perform any required preprocessing of the tabu_edges.
Args:
tabu_edges: The original tabu_edges.
Returns:
Preprocessed tabu_edges.
"""
return tabu_edges
# pylint: disable=no-self-use
def preprocess_tabu_nodes(self, tabu_nodes: List[int]) -> List[int]:
"""
Overload this method to perform any required preprocessing of the tabu_nodes.
Args:
tabu_nodes: The original tabu_nodes.
Returns:
Preprocessed tabu_nodes.
"""
return tabu_nodes
# pylint: disable=no-self-use
def update_idx_col(self, idx_col: Dict[int, str]) -> Dict[int, str]:
"""
Overload this method to update the idx_col dict with expanded colnames.
Args:
idx_col: The original index to column mapping.
Returns:
Updated index to column mapping.
"""
return idx_col
def add_to_node(self, sm: StructureModel) -> StructureModel:
"""
Adds self to a node of a structure model corresponding to self.idx.
Args:
sm: The input StructureModel
Returns:
Updated StructureModel
"""
sm.nodes[self.idx]["dist_type"] = self
return sm
# pylint: disable=no-self-use
def modify_h(self, square_weight_mat: torch.Tensor) -> torch.Tensor:
"""
Overload this method to apply updates to the W matrix in h(W).
Typically used to prevent spurious cycles when using expended columns.
Args:
square_weight_mat: The weight matrix used in h(W).
Returns:
Updated weight matrix used in h(W).
"""
return square_weight_mat
# pylint: disable=no-self-use
def collapse_adj(self, adj: np.ndarray) -> np.ndarray:
"""
Overload this method to apply updates to collapse the W matrix
of a multi-parameter distribution
Likely has the same impact as modify_h.
Args:
adj: The adjacency matrix.
Returns:
Updated adjacency matrix.
"""
return adj
@abstractmethod
def loss(self, X: torch.Tensor, X_hat: torch.Tensor) -> torch.Tensor:
"""
Args:
X: The original data passed into NOTEARS (i.e. the reconstruction target).
X_hat: The reconstructed data.
Returns:
Scalar pytorch tensor of the reconstruction loss between X and X_hat.
"""
raise NotImplementedError("Must implement the loss() method")
@abstractmethod
def inverse_link_function(self, X_hat: torch.Tensor) -> torch.Tensor:
"""
Convert the transformed data from the latent space to the original dtype
using the inverse link function.
Args:
X_hat: Reconstructed data in the latent space.
Returns:
Modified X_hat.
MUST be same shape as passed in data.
Projects the self.idx column from the latent space to the dist_type space.
"""
raise NotImplementedError("Must implement the inverse_link_function() method")
class ExpandColumnsMixin:
"""
Mixin class providing convenience methods for column expansion.
"""
@staticmethod
def _expand_columns(X: np.ndarray, new_columns: np.ndarray) -> np.ndarray:
"""
Expands the data matrix columns without reordering the indices.
Args:
X: Base dataset to expand.
new_columns: The columns to expand the dataset by.
Returns:
Expanded dataset.
"""
return np.hstack([X, new_columns])
@staticmethod
def update_tabu_edges(
idx_group: List[int],
tabu_edges: List[Tuple[int, int]],
tabu_idx_group: bool,
) -> List[Tuple[int, int]]:
"""
Tabu edges are:
1. all user defined connections to original feature column
2. all inter-feature connections (optional)
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_edges: The list of tabu_edges to be updated.
tabu_idx_group: Whether inter-group edges should also be considered tabu.
I.e if a result of a column expansion, often want to prevent edges being learned
between parameters.
Returns:
Updated tabu_edges
"""
if tabu_edges is None:
tabu_edges = []
# copy to prevent mutations
tabu_edges = deepcopy(tabu_edges)
# handle 1.
new_tabu_edges = []
# for each original tabu pair
for (i, j) in tabu_edges:
# idx_group[0] is the original column index
if i == idx_group[0]:
new_tabu_edges += [(idx, j) for idx in idx_group[1:]]
elif j == idx_group[0]:
new_tabu_edges += [(i, idx) for idx in idx_group[1:]]
# all new edges added to tabu_edges
tabu_edges += new_tabu_edges
# handle 2.
if tabu_idx_group:
# add on all pairwise permutations of particular feature group
# NOTE: permutations are needed for edge directionality
tabu_edges += list(itertools.permutations(idx_group, 2))
return tabu_edges
@staticmethod
def update_tabu_nodes(
idx_group: List[int], tabu_nodes: List[int]
) -> List[Tuple[int, int]]:
"""
Tabu nodes are:
1. all user defined connections to original feature column
Args:
idx_group: The group of indices which correspond to a single
expanded column.
tabu_nodes: The list of tabu_nodes to be updated.
Returns:
Updated tabu_nodes
"""
if tabu_nodes is None:
return tabu_nodes
# copy to prevent mutations
tabu_nodes = deepcopy(tabu_nodes)
new_tabu_nodes = []
for i in tabu_nodes:
# NOTE: the first element in the idx_group is guaranteed as self.idx
if i == idx_group[0]:
new_tabu_nodes += idx_group[1:]
# add on the new tabu nodes
tabu_nodes += new_tabu_nodes
return tabu_nodes
|
docs/examples/Moving_Platform_Simulation.py | Red-Portal/Stone-Soup-1 | 157 | 15018 | #!/usr/bin/env python
# coding: utf-8
"""
Multi-Sensor Moving Platform Simulation Example
===============================================
This example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving
platform as a sensor target.
"""
# %%
# Building a Simulated Multi-Sensor Moving Platform
# -------------------------------------------------
# The focus of this example is to show how to setup and configure a simulation environment in order to provide a
# multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information
# about trackers and how to configure them review of the tutorials and demonstrations is recommended.
#
# This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and
# :class:`~.Sensor` objects.
#
# In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects.
# As these have been introduced in previous tutorials they are imported upfront. New functionality within this example
# will be imported at the relevant point in order to draw attention to the new features.
# Some general imports and set up
from datetime import datetime
from datetime import timedelta
from matplotlib import pyplot as plt
import numpy as np
# Stone Soup imports:
from stonesoup.types.state import State, GaussianState
from stonesoup.types.array import StateVector
from stonesoup.types.array import CovarianceMatrix
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.predictor.particle import ParticlePredictor
from stonesoup.resampler.particle import SystematicResampler
from stonesoup.updater.particle import ParticleUpdater
from stonesoup.measures import Mahalanobis
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.tracker.simple import SingleTargetTracker
# Define the simulation start time
start_time = datetime.now()
# %%
# Create a multi-sensor platform
# ------------------------------
# We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a
# :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a
# :class:`~.MultiTargetGroundTruthSimulator`.
#
# In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within
# a 6 dimensional state space according to the following :math:`\mathbf{x}`.
#
# .. math::
# \mathbf{x} = \begin{bmatrix}
# x\\ \dot{x}\\ y\\ \dot{y}\\ z\\ \dot{z} \end{bmatrix}
# = \begin{bmatrix}
# 0\\ 0\\ 0\\ 50\\ 8000\\ 0 \end{bmatrix}
#
# The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise.
# Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by:
#
# .. math::
# F_{k} = \begin{bmatrix}
# 1 & \triangle k & 0 & 0 & 0 & 0\\
# 0 & 1 & 0 & 0 & 0 & 0\\
# 0 & 0 & 1 & \triangle k & 0 & 0\\
# 0 & 0 & 0 & 1 & 0 & 0\\
# 0 & 0 & 0 & 0 & 1 & \triangle k \\
# 0 & 0 & 0 & 0 & 0 & 1\\
# \end{bmatrix}
# First import the Moving platform
from stonesoup.platform.base import MovingPlatform
# Define the initial platform position, in this case the origin
initial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])
initial_state = State(initial_loc, start_time)
# Define transition model and position for 3D platform
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# create our fixed platform
sensor_platform = MovingPlatform(states=initial_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
transition_model=transition_model)
# %%
# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this
# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor
# (e.g. an optical sensor, which has no capability to directly measure range).
#
# First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range
# (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform.
# Import a range rate bearing elevation capable radar
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate
# Create a radar sensor
radar_noise_covar = CovarianceMatrix(np.diag(
np.array([np.deg2rad(3), # Elevation
np.deg2rad(3), # Bearing
100., # Range
25.]))) # Range Rate
# radar mountings
radar_mounting_offsets = StateVector([10, 0, 0]) # e.g. nose cone
radar_rotation_offsets = StateVector([0, 0, 0])
# Mount the radar onto the platform
radar = RadarElevationBearingRangeRate(ndim_state=6,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
noise_covar=radar_noise_covar,
mounting_offset=radar_mounting_offsets,
rotation_offset=radar_rotation_offsets,
)
sensor_platform.add_sensor(radar)
# %%
# Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\phi`) and elevation (:math:`\theta`)
# of the target platform. For the purposes of this example we will assume that the passive sensor is an imager.
# The imager sensor model is described by the following equations:
#
# .. math::
# \mathbf{z}_k = h(\mathbf{x}_k, \dot{\mathbf{x}}_k)
#
# where:
#
# * :math:`\mathbf{z}_k` is a measurement vector of the form:
#
# .. math::
# \mathbf{z}_k = \begin{bmatrix} \theta \\ \phi \end{bmatrix}
#
# * :math:`h` is a non - linear model function of the form:
#
# .. math::
# h(\mathbf{x}_k,\dot{\mathbf{x}}_k) = \begin{bmatrix}
# \arcsin(\mathcal{z} /\sqrt{\mathcal{x} ^ 2 + \mathcal{y} ^ 2 +\mathcal{z} ^ 2}) \\
# \arctan(\mathcal{y},\mathcal{x}) \ \
# \end{bmatrix} + \dot{\mathbf{x}}_k
#
# * :math:`\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.:
#
# .. math::
# \mathbf{z}_k \sim \mathcal{N}(0, R)
#
# .. math::
# R = \begin{bmatrix}
# \sigma_{\theta}^2 & 0 \\
# 0 & \sigma_{\phi}^2 \\
# \end{bmatrix}
# Import a passive sensor capability
from stonesoup.sensor.passive import PassiveElevationBearing
imager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05), # Elevation
np.deg2rad(0.05)]))) # Bearing
# imager mounting offset
imager_mounting_offsets = StateVector([0, 8, -1]) # e.g. wing mounted imaging pod
imager_rotation_offsets = StateVector([0, 0, 0])
# Mount the imager onto the platform
imager = PassiveElevationBearing(ndim_state=6,
mapping=(0, 2, 4),
noise_covar=imager_noise_covar,
mounting_offset=imager_mounting_offsets,
rotation_offset=imager_rotation_offsets,
)
sensor_platform.add_sensor(imager)
# %%
# Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter.
# The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset
# of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the
# platform.
#
# Now that we have mounted the two sensors we can see that the platform object has both associated with it:
sensor_platform.sensors
# %%
# Create a Target Platform
# ------------------------
# There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator
# functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on
# our selected parameters. The second method provides a means to generate a target which will perform specific
# behaviours, this is the approach we will take here.
#
# In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as
# sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform
# to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop
# over the transition sequence provided until the simulation ends.
#
# When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as
# potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms
# within the simulation (sensor-target geometry dependant).
#
# For this example we will create an air target which will fly a sequence of straight and level followed by a
# coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8
# seconds, and it will turn through 45 degrees over the course of the turn manoeuvre.
# Import a Constant Turn model to enable target to perform basic manoeuvre
from stonesoup.models.transition.linear import ConstantTurn
straight_level = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# Configure the aircraft turn behaviour
turn_noise_diff_coeffs = np.array([0., 0.])
turn_rate = np.pi/32 # specified in radians per seconds...
turn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)
# Configure turn model to maintain current altitude
turning = CombinedLinearGaussianTransitionModel(
[turn_model, ConstantVelocity(0.)])
manoeuvre_list = [straight_level, turning]
manoeuvre_times = [timedelta(seconds=8),
timedelta(seconds=8)]
# %%
# Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving
# platform. Because we intend for this platform to be a target we do not need to attach any sensors to it.
# Import a multi-transition moving platform
from stonesoup.platform.base import MultiTransitionMovingPlatform
initial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])
initial_target_state = State(initial_target_location, start_time)
target = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,
transition_times=manoeuvre_times,
states=initial_target_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
sensors=None)
# %%
# Creating the simulator
# ----------------------
# Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do
# not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a
# :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then
# feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built.
# Import the required simulators
from stonesoup.simulator.simple import DummyGroundTruthSimulator
from stonesoup.simulator.platform import PlatformDetectionSimulator
# %%
# We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for
# 25 seconds.
times = np.arange(0, 24, 1) # 25 seconds
timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]
truths = DummyGroundTruthSimulator(times=timestamps)
sim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])
# %%
# Create a Tracker
# ------------------------------------
# Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we
# will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example
# we will use an inflated constant noise model to account for target motion uncertainty.
#
# Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to
# each detection they generate. The tracker handles this internally by checking for a measurement model with each
# detection it receives and applying only the relevant measurement model.
target_transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])
# First add a Particle Predictor
predictor = ParticlePredictor(target_transition_model)
# Now create a resampler and particle updater
resampler = SystematicResampler()
updater = ParticleUpdater(measurement_model=None,
resampler=resampler)
# Create a particle initiator
from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator
single_point_initiator = SinglePointInitiator(
GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),
None)
initiator = GaussianParticleInitiator(number_particles=500,
initiator=single_point_initiator)
hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)
data_associator = GNNWith2DAssignment(hypothesiser)
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(time_steps_since_update=10)
# Create a Kalman single-target tracker
tracker = SingleTargetTracker(
initiator=initiator,
deleter=deleter,
detector=sim,
data_associator=data_associator,
updater=updater
)
# %%
# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing
# only sensor it does not make sense to plot out the detections without animating the resulting plot. This
# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target
# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are
# coloured green.
from matplotlib import animation
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate
from stonesoup.functions import sphere2cart
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
frames = []
for time, ctracks in tracker:
artists = []
ax.set_xlabel("$East$")
ax.set_ylabel("$North$")
ax.set_ylim(0, 2250)
ax.set_xlim(-1000, 1000)
X = [state.state_vector[0] for state in sensor_platform]
Y = [state.state_vector[2] for state in sensor_platform]
artists.extend(ax.plot(X, Y, color='b'))
for detection in sim.detections:
if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):
x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]
color = 'y'
else:
r = 10000000
# extract the platform rotation offsets
_, el_offset, az_offset = sensor_platform.orientation
# obtain measurement angles and map to cartesian
e, a = detection.state_vector
x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)
color = 'g'
X = [sensor_platform.state_vector[0], x]
Y = [sensor_platform.state_vector[2], y]
artists.extend(ax.plot(X, Y, color=color))
X = [state.state_vector[0] for state in target]
Y = [state.state_vector[2] for state in target]
artists.extend(ax.plot(X, Y, color='r'))
for track in ctracks:
X = [state.state_vector[0] for state in track]
Y = [state.state_vector[2] for state in track]
artists.extend(ax.plot(X, Y, color='k'))
frames.append(artists)
animation.ArtistAnimation(fig, frames)
# %%
# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly
# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels
# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.
# %%
# Key points
# ----------
# 1. Platforms, static or moving, can be used as targets for sensor platforms.
# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.
# 3. A tracker can be configured to exploit all sensor data created in a simulation.
|
examples/ndfd/ndfd.py | eLBati/pyxb | 123 | 15021 | from raw.ndfd import *
|
backend/kale/tests/assets/kfp_dsl/simple_data_passing.py | brness/kale | 502 | 15035 | import json
import kfp.dsl as _kfp_dsl
import kfp.components as _kfp_components
from collections import OrderedDict
from kubernetes import client as k8s_client
def step1():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal([], ['_b', '_a'], _kale_pipeline_parameters, "/marshal")
def step1():
a = 1
b = 2
return a, b
step1()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step2():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common.runutils import ttl as _kale_ttl
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_ttl(5)
@_kale_marshal(['_b', '_a'], ['_c'], _kale_pipeline_parameters, "/marshal")
def step2(a, b):
c = a + b
print(c)
return c
step2()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step3():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal(['_a', '_c'], [], _kale_pipeline_parameters, "/marshal")
def step3(a, c):
d = c + a
print(d)
step3()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
_kale_step1_op = _kfp_components.func_to_container_op(step1)
_kale_step2_op = _kfp_components.func_to_container_op(step2)
_kale_step3_op = _kfp_components.func_to_container_op(step3)
@_kfp_dsl.pipeline(
name='test',
description=''
)
def auto_generated_pipeline():
_kale_pvolumes_dict = OrderedDict()
_kale_volume_step_names = []
_kale_volume_name_parameters = []
_kale_marshal_vop = _kfp_dsl.VolumeOp(
name="kale-marshal-volume",
resource_name="kale-marshal-pvc",
modes=['ReadWriteMany'],
size="1Gi"
)
_kale_volume_step_names.append(_kale_marshal_vop.name)
_kale_volume_name_parameters.append(
_kale_marshal_vop.outputs["name"].full_name)
_kale_pvolumes_dict['/marshal'] = _kale_marshal_vop.volume
_kale_volume_step_names.sort()
_kale_volume_name_parameters.sort()
_kale_step1_task = _kale_step1_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after()
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step1_task.add_pod_label(_kale_k, _kale_v)
_kale_step_limits = {'amd/gpu': '1'}
for _kale_k, _kale_v in _kale_step_limits.items():
_kale_step1_task.container.add_resource_limit(_kale_k, _kale_v)
_kale_step1_task.container.working_dir = "/test"
_kale_step1_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step1_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step1_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step1_task.dependent_names +
_kale_volume_step_names)
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step2_task = _kale_step2_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step1_task)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step2_task.add_pod_label(_kale_k, _kale_v)
_kale_step2_task.set_retry_strategy(
num_retries=5,
retry_policy="Always",
backoff_duration="20",
backoff_factor=2,
backoff_max_duration=None)
_kale_step2_task.container.working_dir = "/test"
_kale_step2_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step2_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step2_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step2_task.dependent_names +
_kale_volume_step_names)
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step3_task = _kale_step3_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step2_task, _kale_step1_task)
_kale_step_annotations = {'step3-annotation': 'test'}
for _kale_k, _kale_v in _kale_step_annotations.items():
_kale_step3_task.add_pod_annotation(_kale_k, _kale_v)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step3_task.add_pod_label(_kale_k, _kale_v)
_kale_step3_task.container.working_dir = "/test"
_kale_step3_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step3_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step3_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step3_task.dependent_names +
_kale_volume_step_names)
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('test')
# Submit a pipeline run
from kale.common import kfputils
pipeline_id, version_id = kfputils.upload_pipeline(
pipeline_filename, "test")
run_result = kfputils.run_pipeline(
experiment_name=experiment.name, pipeline_id=pipeline_id, version_id=version_id)
|
gaphor/RAAML/stpa/connectors.py | Texopolis/gaphor | 867 | 15054 | <filename>gaphor/RAAML/stpa/connectors.py<gh_stars>100-1000
from gaphor.diagram.connectors import Connector
from gaphor.diagram.presentation import Classified
from gaphor.RAAML.raaml import RelevantTo
from gaphor.RAAML.stpa import RelevantToItem
from gaphor.SysML.requirements.connectors import DirectedRelationshipPropertyPathConnect
@Connector.register(Classified, RelevantToItem)
class RelevantToConnect(DirectedRelationshipPropertyPathConnect):
relation_type = RelevantTo
|
python/test/experimental/test_tb_graph_writer.py | daniel-falk/nnabla | 2,792 | 15067 | # Copyright 2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
def test_show_graph():
try:
from nnabla.experimental.tb_graph_writer import TBGraphWriter
except:
pytest.skip(
'Skip because tensorboardX and tensorflow is not installed.')
nn.clear_parameters()
x = nn.Variable((2, 3, 4, 4))
with nn.parameter_scope('c1'):
h = PF.convolution(x, 8, (3, 3), pad=(1, 1))
h = F.relu(PF.batch_normalization(h))
with nn.parameter_scope('f1'):
y = PF.affine(h, 10)
with TBGraphWriter(log_dir='log_out') as tb:
tb.from_variable(y, output_name="y")
def test_show_curve():
try:
from nnabla.experimental.tb_graph_writer import TBGraphWriter
except:
pytest.skip(
'Skip because tensorboardX and tensorflow is not installed.')
with TBGraphWriter(log_dir='log_out') as tb:
values = []
for i in range(360):
s = np.sin(i / 180.0 * np.pi)
tb.add_scalar("show_curve/sin", s, i)
values.append(s)
nd_values = np.array(values)
for i in range(10):
tb.add_histogram("histogram", nd_values, i)
nd_values += 0.05
|
pyperform/tools.py | timgates42/pyperform | 250 | 15080 | <gh_stars>100-1000
__author__ = 'calvin'
import re
import sys
from math import log10
if sys.version[0] == '3':
pass
else:
range = xrange
classdef_regex = re.compile(r"\S*def .*#!|class .*#!")
tagged_line_regex = re.compile(r".*#!")
def convert_time_units(t):
""" Convert time in seconds into reasonable time units. """
if t == 0:
return '0 s'
order = log10(t)
if -9 < order < -6:
time_units = 'ns'
factor = 1000000000
elif -6 <= order < -3:
time_units = 'us'
factor = 1000000
elif -3 <= order < -1:
time_units = 'ms'
factor = 1000.
elif -1 <= order:
time_units = 's'
factor = 1
return "{:.3f} {}".format(factor * t, time_units)
def globalize_indentation(src):
""" Strip the indentation level so the code runs in the global scope. """
lines = src.splitlines()
indent = len(lines[0]) - len(lines[0].strip(' '))
func_src = ''
for ii, l in enumerate(src.splitlines()):
line = l[indent:]
func_src += line + '\n'
return func_src
def remove_decorators(src):
""" Remove decorators from the source code """
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src
def get_tagged_imports(fp):
imports = []
inside_def = False
def_lines = []
def_indent = 0
with open(fp, 'r') as f:
lastLine = f.readline()
for line in f:
tagged_class_or_def = re.findall(classdef_regex, lastLine)
tagged_line = re.findall(tagged_line_regex, lastLine)
# Find the indentation level of the function/class definition and capture all source code lines
# until we get a line that is the same indentation level (end of function/class definition).
if tagged_class_or_def or inside_def:
if tagged_class_or_def and def_lines:
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
if inside_def:
# For lines within the definition
indent = len(lastLine) - len(lastLine.lstrip(' '))
if indent == def_indent and lastLine != '\n':
imports.append(''.join(def_lines))
def_lines = []
inside_def = False
def_indent = 0
if tagged_line:
imports.append(lastLine)
else:
if lastLine != '\n':
def_lines.append(lastLine)
else:
# For the definition line
inside_def = True
def_indent = len(lastLine) - len(lastLine.lstrip(' '))
def_lines.append(lastLine)
elif tagged_line:
imports.append(lastLine)
lastLine = line
# Examine the last line
tagged_line = re.findall(tagged_line_regex, lastLine)
if inside_def:
def_lines.append(line)
imports.append(''.join(def_lines))
elif tagged_line:
imports.append(line)
src = '\n'.join(imports) + '\n'
return src
def generate_call_statement(func, is_class_method, *args, **kwargs):
# Create the call statement
if is_class_method:
stmt = 'instance.' + func.__name__ + '('
else:
stmt = func.__name__ + '('
for arg in args:
stmt += arg.__repr__() + ', '
for kw, val in kwargs.items():
stmt += '{0}={1}, '.format(kw, val.__repr__())
stmt = stmt.strip(', ')
stmt += ')'
return stmt
def walk_tree(start, attr):
"""
Recursively walk through a tree relationship. This iterates a tree in a top-down approach,
fully reaching the end of a lineage before moving onto the next sibling of that generation.
"""
path = [start]
for child in path:
yield child
idx = path.index(child)
for grandchild in reversed(getattr(child, attr)):
path.insert(idx + 1, grandchild) |
google_or_tools/coloring_ip_sat.py | tias/hakank | 279 | 15101 | # Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple coloring problem (MIP approach) in OR-tools CP-SAT Solver.
Inspired by the GLPK:s model color.mod
'''
COLOR, Graph Coloring Problem
Written in GNU MathProg by <NAME> <<EMAIL>>
Given an undirected loopless graph G = (V, E), where V is a set of
nodes, E <= V x V is a set of arcs, the Graph Coloring Problem is to
find a mapping (coloring) F: V -> C, where C = {1, 2, ... } is a set
of colors whose cardinality is as small as possible, such that
F(i) != F(j) for every arc (i,j) in E, that is adjacent nodes must
be assigned different colors.
'''
This is a port of my old OR-tools CP solver coloring_ip.py
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tols models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main():
model = cp.CpModel()
# max number of colors
# [we know that 4 suffices for normal maps]
nc = 5
# number of nodes
n = 11
# set of nodes
V = list(range(n))
num_edges = 20
#
# Neighbours
#
# This data correspond to the instance myciel3.col from:
# http://mat.gsia.cmu.edu/COLOR/instances.html
#
# Note: 1-based (adjusted below)
E = [[1, 2], [1, 4], [1, 7], [1, 9], [2, 3], [2, 6], [2, 8], [3, 5], [3, 7],
[3, 10], [4, 5], [4, 6], [4, 10], [5, 8], [5, 9], [6, 11], [7, 11],
[8, 11], [9, 11], [10, 11]]
#
# declare variables
#
# x[i,c] = 1 means that node i is assigned color c
x = {}
for v in V:
for j in range(nc):
x[v, j] = model.NewIntVar(0, 1, 'v[%i,%i]' % (v, j))
# u[c] = 1 means that color c is used, i.e. assigned to some node
u = [model.NewIntVar(0, 1, 'u[%i]' % i) for i in range(nc)]
# number of colors used, to minimize
num_colors = model.NewIntVar(0,nc, "num_colors")
model.Add(num_colors == sum(u))
#
# constraints
#
# each node must be assigned exactly one color
for i in V:
model.Add(sum([x[i, c] for c in range(nc)]) == 1)
# adjacent nodes cannot be assigned the same color
# (and adjust to 0-based)
for i in range(num_edges):
for c in range(nc):
model.Add(x[E[i][0] - 1, c] + x[E[i][1] - 1, c] <= u[c])
# objective
model.Minimize(num_colors)
#
# solution
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print()
print('number of colors:', solver.Value(num_colors))
print('colors used:', [solver.Value(u[i]) for i in range(nc)])
print()
for v in V:
print('v%i' % v, ' color ', end=' ')
for c in range(nc):
if solver.Value(x[v, c]) == 1:
print(c)
print()
print('NumConflicts:', solver.NumConflicts())
print('NumBranches:', solver.NumBranches())
print('WallTime:', solver.WallTime())
if __name__ == '__main__':
main()
|
tools/find_run_binary.py | pospx/external_skia | 6,304 | 15106 | #!/usr/bin/python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that finds and runs a binary by looking in the likely locations."""
import os
import subprocess
import sys
def run_command(args):
"""Runs a program from the command line and returns stdout.
Args:
args: Command line to run, as a list of string parameters. args[0] is the
binary to run.
Returns:
stdout from the program, as a single string.
Raises:
Exception: the program exited with a nonzero return code.
"""
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode is not 0:
raise Exception('command "%s" failed: %s' % (args, stderr))
return stdout
def find_path_to_program(program):
"""Returns path to an existing program binary.
Args:
program: Basename of the program to find (e.g., 'render_pictures').
Returns:
Absolute path to the program binary, as a string.
Raises:
Exception: unable to find the program binary.
"""
trunk_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
possible_paths = [os.path.join(trunk_path, 'out', 'Release', program),
os.path.join(trunk_path, 'out', 'Debug', program),
os.path.join(trunk_path, 'out', 'Release',
program + '.exe'),
os.path.join(trunk_path, 'out', 'Debug',
program + '.exe')]
for try_path in possible_paths:
if os.path.isfile(try_path):
return try_path
raise Exception('cannot find %s in paths %s; maybe you need to '
'build %s?' % (program, possible_paths, program))
|
pml/engineer_tests.py | gatapia/py_ml_utils | 183 | 15129 | from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
|
src/pyrobot/habitat/base.py | cihuang123/pyrobot | 2,150 | 15131 | import numpy as np
import math
import pyrobot.utils.util as prutil
import rospy
import habitat_sim.agent as habAgent
import habitat_sim.utils as habUtils
from habitat_sim.agent.controls import ActuationSpec
import habitat_sim.errors
import quaternion
from tf.transformations import euler_from_quaternion, euler_from_matrix
class LoCoBotBase(object):
"""docstring for SimpleBase"""
def __init__(self, configs, simulator):
self.configs = configs
self.sim = simulator.sim
self.agent = self.sim.get_agent(self.configs.COMMON.SIMULATOR.DEFAULT_AGENT_ID)
self.transform = None
self.init_state = self.get_full_state()
def execute_action(self, action_name, actuation):
# actions = "turn_right" or "turn_left" or "move_forward"
# returns a bool showing if collided or not
return self._act(action_name, actuation)
def get_full_state(self):
# Returns habitat_sim.agent.AgentState
return self.agent.get_state()
def _rot_matrix(self, habitat_quat):
quat_list = [habitat_quat.x, habitat_quat.y, habitat_quat.z, habitat_quat.w]
return prutil.quat_to_rot_mat(quat_list)
def get_state(self, state_type="odom"):
# Returns (x, y, yaw)
assert state_type == "odom", "Error: Only Odom state is available"
cur_state = self.get_full_state()
init_rotation = self._rot_matrix(self.init_state.rotation)
# true position here refers to the relative position from
# where `self.init_state` is treated as origin
true_position = cur_state.position - self.init_state.position
true_position = np.matmul(init_rotation.transpose(), true_position, dtype=np.float64)
cur_rotation = self._rot_matrix(cur_state.rotation)
cur_rotation = np.matmul(init_rotation.transpose(), cur_rotation, dtype=np.float64)
(r, pitch, yaw) = euler_from_matrix(cur_rotation, axes="sxzy")
# Habitat has y perpendicular to map where as ROS has z perpendicular
# to the map. Where as x is same.
# Here ROS_X = -1 * habitat_z and ROS_Y = -1*habitat_x
return (-1 * true_position[2], -1 * true_position[0], yaw)
def stop(self):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def set_vel(self, fwd_speed, turn_speed, exe_time=1):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def go_to_relative(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given
goal state relative to its initial pose.
:param xyt_position: The relative goal state of the form (x,y,t)
:param use_map: When set to "True", ensures that controler is
using only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
abs_yaw = cur_yaw + xyt_position[2]
return self._go_to_relative_pose(xyt_position[0], xyt_position[1], abs_yaw)
def go_to_absolute(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given goal state in the world frame.
:param xyt_position: The goal state of the form (x,y,t)
in the world (map) frame.
:param use_map: When set to "True", ensures that controler is using
only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_X = xyt_position[0] - cur_x
rel_Y = xyt_position[1] - cur_y
abs_yaw = xyt_position[2]
# convert rel_X & rel_Y from global frame to current frame
R = np.array([[np.cos(cur_yaw), np.sin(cur_yaw)],
[-np.sin(cur_yaw), np.cos(cur_yaw)]])
rel_x, rel_y = np.matmul(R, np.array([rel_X, rel_Y]).reshape(-1,1))
return self._go_to_relative_pose(rel_x[0], rel_y[0], abs_yaw)
def _act(self, action_name, actuation):
"""Take the action specified by action_id
:param action_id: ID of the action. Retreives the action from
`agent_config.action_space <AgentConfiguration.action_space>`
:return: Whether or not the action taken resulted in a collision
"""
did_collide = False
act_spec = ActuationSpec(actuation)
did_collide = self.agent.controls.action(
self.agent.scene_node, action_name, act_spec, apply_filter=True
)
return did_collide
def _go_to_relative_pose(self, rel_x, rel_y, abs_yaw):
# clip relative movements beyond 10 micrometer precision
# this is done to improve determinism, as habitat-sim doesn't
# seem to precisely move the robot beyond sub milimeter precision anyways
if abs(rel_x) < 1e-5:
rel_x = 0
if abs(rel_y) < 1e-5:
rel_y = 0
if math.sqrt(rel_x ** 2 + rel_y ** 2) > 0.0:
# rotate to point to (x, y) point
action_name = "turn_left"
if rel_y < 0.0:
action_name = "turn_right"
v1 = np.asarray([1, 0], dtype=np.float64)
v2 = np.asarray([rel_x, rel_y], dtype=np.float64)
cosine_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cosine_angle)
did_collide = self._act(action_name, math.degrees(angle))
if did_collide:
print("Error: Collision accured while 1st rotating!")
return False
# move to (x,y) point
did_collide = self._act("move_forward", math.sqrt(rel_x ** 2 + rel_y ** 2))
if did_collide:
print("Error: Collision accured while moving straight!")
return False
# rotate to match the final yaw!
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_yaw = abs_yaw - cur_yaw
# clip to micro-degree precision to preserve determinism
if abs(rel_yaw) < 1e-4:
rel_yaw = 0
action_name = "turn_left"
if rel_yaw < 0.0:
action_name = "turn_right"
rel_yaw *= -1
did_collide = self._act(action_name, math.degrees(rel_yaw))
if did_collide:
print("Error: Collision accured while rotating!")
return False
return True
def track_trajectory(self, states, controls, close_loop):
"""
State trajectory that the robot should track.
:param states: sequence of (x,y,t) states that the robot should track.
:param controls: optionally specify control sequence as well.
:param close_loop: whether to close loop on the
computed control sequence or not.
:type states: list
:type controls: list
:type close_loop: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
raise NotImplementedError
|
nanpy/bmp180.py | AFTC-1/Arduino-rpi | 178 | 15135 | <filename>nanpy/bmp180.py
from __future__ import division
import logging
from nanpy.i2c import I2C_Master
from nanpy.memo import memoized
import time
log = logging.getLogger(__name__)
def to_s16(n):
return (n + 2 ** 15) % 2 ** 16 - 2 ** 15
class Bmp180(object):
"""Control of BMP180 Digital pressure sensor (I2C)
calculation is based on Bosch datasheet."""
def __init__(self, wire, address=0x77, oss=3):
self.i2c = I2C_Master(wire)
self.address = address
self.oss = oss
def read_bytes(self, address, count):
self.i2c.send(self.address, [address])
x = self.i2c.request(self.address, count)
return x
def write_byte(self, address, data):
self.i2c.send(self.address, [address, data])
@property
@memoized
def eeprom(self):
return self.read_bytes(0xaa, 22)
def read_temperature_raw(self):
self.write_byte(0xf4, 0x2e)
time.sleep(0.005)
MSB, LSB = self.read_bytes(0xf6, 2)
UT = (MSB << 8) + LSB
return UT
def read_pressure_raw(self):
self.write_byte(0xf4, 0x34 + (self.oss << 6))
time.sleep(0.005)
MSB, LSB, XLSB = self.read_bytes(0xf6, 3)
UP = ((MSB << 16) + (LSB << 8) + XLSB) >> (8 - self.oss)
return UP
@classmethod
def calculate(cls, pressure_raw, temperature_raw, oss, eeprom):
'''
return: Pascal, Celsius
'''
UT = temperature_raw
UP = pressure_raw
def ushort(i):
return (eeprom[2 * i] << 8) + eeprom[2 * i + 1]
def short(i):
return to_s16(ushort(i))
AC1 = short(0)
AC2 = short(1)
AC3 = short(2)
AC4 = ushort(3)
AC5 = ushort(4)
AC6 = ushort(5)
B1 = short(6)
B2 = short(7)
# MB = short(8)
MC = short(9)
MD = short(10)
X1 = ((UT - AC6) * AC5) >> 15
X2 = (MC << 11) // (X1 + MD)
B5 = X1 + X2
T = (B5 + 8) >> 4
B6 = B5 - 4000
X1 = (B2 * ((B6 * B6) >> 12)) >> 11
X2 = (AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((AC1 * 4 + X3) << oss) + 2) // 4
X1 = (AC3 * B6) >> 13
X2 = (B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) // 4
B4 = (AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> oss)
p = (B7 * 2) // B4 if B7 < 0x80000000 else (B7 // B4) * 2
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
p += (X1 + X2 + 3791) >> 4
return p, T / 10
def read(self):
'''
return: Pascal, Celsius
'''
temperature_raw = self.read_temperature_raw()
pressure_raw = self.read_pressure_raw()
return self.calculate(
pressure_raw,
temperature_raw,
self.oss,
self.eeprom,
)
|
qf_lib/backtesting/order/order.py | webclinic017/qf-lib | 198 | 15146 | <gh_stars>100-1000
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.time_in_force import TimeInForce
class Order(object):
"""
Order generated by a strategy, then processed by PositionSizer.
Finally executed by ExecutionHandler.
"""
def __init__(self, contract: Contract, quantity: int, execution_style: ExecutionStyle,
time_in_force: TimeInForce, order_state=""):
"""
This __init__ shouldn't be used anywhere beyond this module. Use OrderFactory for creating Order objects.
"""
self.id = None # type:int
self.contract = contract
self.quantity = quantity
self.time_in_force = time_in_force
self.execution_style = execution_style
self.order_state = order_state
def __str__(self):
return '\nOrder:\n' \
'\tid: {}\n' \
'\tcontract: {}\n' \
'\tquantity: {}\n' \
'\ttif: {}\n' \
'\texecution_style: {}\n' \
'\torder_state: {}'.format(self.id, str(self.contract), self.quantity, str(self.time_in_force),
self.execution_style, self.order_state)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Order):
return False
# one Order has id and another hasn't
if (self.id is None) != (other.id is None):
return False
if self.id is not None and other.id == self.id:
return True
# when both ids are none -> compare the values
return (self.contract, self.quantity, self.time_in_force, self.execution_style) == \
(other.contract, other.quantity, other.time_in_force, other.execution_style)
def __hash__(self):
return hash((self.contract, self.quantity, self.time_in_force, self.execution_style))
|
hallucinate/api.py | SySS-Research/hallucinate | 199 | 15148 | class BaseHandler:
def send(self, data, p):
pass
def recv(self, data, p):
pass
def shutdown(self, p, direction=2):
pass
def close(self):
pass
|
seq2seq-chatbot/chat_web.py | rohitkujur1997/chatbot | 104 | 15151 | <reponame>rohitkujur1997/chatbot<filename>seq2seq-chatbot/chat_web.py
"""
Script for serving a trained chatbot model over http
"""
import datetime
import click
from os import path
from flask import Flask, request, send_from_directory
from flask_cors import CORS
from flask_restful import Resource, Api
import general_utils
import chat_command_handler
from chat_settings import ChatSettings
from chatbot_model import ChatbotModel
from vocabulary import Vocabulary
app = Flask(__name__)
CORS(app)
@app.cli.command()
@click.argument("checkpointfile")
@click.option("-p", "--port", type=int)
def serve_chat(checkpointfile, port):
api = Api(app)
#Read the hyperparameters and configure paths
model_dir, hparams, checkpoint = general_utils.initialize_session_server(checkpointfile)
#Load the vocabulary
print()
print ("Loading vocabulary...")
if hparams.model_hparams.share_embedding:
shared_vocab_filepath = path.join(model_dir, Vocabulary.SHARED_VOCAB_FILENAME)
input_vocabulary = Vocabulary.load(shared_vocab_filepath)
output_vocabulary = input_vocabulary
else:
input_vocab_filepath = path.join(model_dir, Vocabulary.INPUT_VOCAB_FILENAME)
input_vocabulary = Vocabulary.load(input_vocab_filepath)
output_vocab_filepath = path.join(model_dir, Vocabulary.OUTPUT_VOCAB_FILENAME)
output_vocabulary = Vocabulary.load(output_vocab_filepath)
#Create the model
print ("Initializing model...")
print()
with ChatbotModel(mode = "infer",
model_hparams = hparams.model_hparams,
input_vocabulary = input_vocabulary,
output_vocabulary = output_vocabulary,
model_dir = model_dir) as model:
#Load the weights
print()
print ("Loading model weights...")
model.load(checkpoint)
# Setting up the chat
chatlog_filepath = path.join(model_dir, "chat_logs", "web_chatlog_{0}.txt".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")))
chat_settings = ChatSettings(hparams.model_hparams, hparams.inference_hparams)
chat_command_handler.print_commands()
class Answer(Resource):
def get(self, question):
is_command, terminate_chat, _ = chat_command_handler.handle_command(question, model, chat_settings)
if terminate_chat:
answer = "[Can't terminate from http request]"
elif is_command:
answer = "[Command processed]"
else:
#If it is not a command (it is a question), pass it on to the chatbot model to get the answer
_, answer = model.chat(question, chat_settings)
if chat_settings.inference_hparams.log_chat:
chat_command_handler.append_to_chatlog(chatlog_filepath, question, answer)
return answer
class UI(Resource):
def get(self):
return send_from_directory(".", "chat_ui.html")
api.add_resource(Answer, "/chat/<string:question>")
api.add_resource(UI, "/chat_ui/")
app.run(debug=False, port=port) |
datar/base/trig_hb.py | stjordanis/datar | 110 | 15179 | <filename>datar/base/trig_hb.py<gh_stars>100-1000
"""Trigonometric and Hyperbolic Functions"""
from typing import Callable
import numpy
from pipda import register_func
from ..core.contexts import Context
from ..core.types import FloatOrIter
from .constants import pi
def _register_trig_hb_func(name: str, np_name: str, doc: str) -> Callable:
"""Register trigonometric and hyperbolic function"""
np_fun = getattr(numpy, np_name)
if name.endswith("pi"):
func = lambda x: np_fun(x * pi)
else:
# ufunc cannot set context
func = lambda x: np_fun(x)
func = register_func(None, context=Context.EVAL, func=func)
func.__name__ = name
func.__doc__ = doc
return func
sin = _register_trig_hb_func(
"sin",
"sin",
doc="""The sine function
Args:
x: a numeric value or iterable
Returns:
The sine value of `x`
""",
)
cos = _register_trig_hb_func(
"cos",
"cos",
doc="""The cosine function
Args:
x: a numeric value or iterable
Returns:
The cosine value of `x`
""",
)
tan = _register_trig_hb_func(
"tan",
"tan",
doc="""The tangent function
Args:
x: a numeric value or iterable
Returns:
The tangent value of `x`
""",
)
acos = _register_trig_hb_func(
"acos",
"arccos",
doc="""The arc-cosine function
Args:
x: a numeric value or iterable
Returns:
The arc-cosine value of `x`
""",
)
asin = _register_trig_hb_func(
"acos",
"arcsin",
doc="""The arc-sine function
Args:
x: a numeric value or iterable
Returns:
The arc-sine value of `x`
""",
)
atan = _register_trig_hb_func(
"acos",
"arctan",
doc="""The arc-sine function
Args:
x: a numeric value or iterable
Returns:
The arc-sine value of `x`
""",
)
sinpi = _register_trig_hb_func(
"sinpi",
"sin",
doc="""The sine function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The sine value of `x`
""",
)
cospi = _register_trig_hb_func(
"cospi",
"cos",
doc="""The cosine function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The cosine value of `x`
""",
)
tanpi = _register_trig_hb_func(
"tanpi",
"tan",
doc="""The tangent function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The tangent value of `x`
""",
)
cosh = _register_trig_hb_func(
"cosh",
"cosh",
doc="""Hyperbolic cosine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic cosine value of `x`
""",
)
sinh = _register_trig_hb_func(
"sinh",
"sinh",
doc="""Hyperbolic sine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic sine value of `x`
""",
)
tanh = _register_trig_hb_func(
"tanh",
"tanh",
doc="""Hyperbolic tangent
Args:
x: a numeric value or iterable
Returns:
The hyperbolic tangent value of `x`
""",
)
acosh = _register_trig_hb_func(
"acosh",
"arccosh",
doc="""Hyperbolic arc-cosine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-cosine value of `x`
""",
)
asinh = _register_trig_hb_func(
"asinh",
"arcsinh",
doc="""Hyperbolic arc-sine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-sine value of `x`
""",
)
atanh = _register_trig_hb_func(
"atanh",
"arctanh",
doc="""Hyperbolic arc-tangent
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-tangent value of `x`
""",
)
@register_func(None, context=Context.EVAL)
def atan2(y: FloatOrIter, x: FloatOrIter) -> FloatOrIter:
"""Calculates the angle between the x-axis and the vector (0,0) -> (x,y)
Args:
y: and
x: The end coordinates of the vector
Returns:
The angle between x-axis and vector (0,0) -> (x,y)
"""
return numpy.arctan2(y, x)
|
tests/unit/cli/test_repo.py | tehlingchu/anchore-cli | 110 | 15217 | from anchorecli.cli import repo
|
src/pyrobot/vrep_locobot/camera.py | gujralsanyam22/pyrobot | 2,150 | 15230 | <reponame>gujralsanyam22/pyrobot
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pyrobot.utils.util as prutil
from pyrobot.core import Camera
from pyrobot.utils.util import try_cv2_import
cv2 = try_cv2_import()
from cv_bridge import CvBridge, CvBridgeError
from pyrep.objects.vision_sensor import VisionSensor
from pyrep.const import ObjectType, PerspectiveMode, RenderMode
from pyrep.objects.joint import Joint
class LoCoBotCamera(Camera):
"""docstring for SimpleCamera"""
def __init__(self, configs, simulator):
self.sim = simulator.sim
self.rgb_cam = VisionSensor("kinect_rgb")
self.depth_cam = VisionSensor("kinect_depth")
self.rgb_cam.set_render_mode(RenderMode.OPENGL3)
self.depth_cam.set_render_mode(RenderMode.OPENGL3)
# Pan and tilt related variables.
self.pan_joint = Joint("LoCoBot_head_pan_joint")
self.tilt_joint = Joint("LoCoBot_head_tilt_joint")
def get_rgb(self):
return self.rgb_cam.capture_rgb()
def get_depth(self):
return self.depth_cam.capture_depth()
def get_rgb_depth(self):
return self.get_rgb(), self.get_depth()
def get_intrinsics(self):
# Todo: Remove this after we fix intrinsics
raise NotImplementedError
"""
Returns the instrinsic matrix of the camera
:return: the intrinsic matrix (shape: :math:`[3, 3]`)
:rtype: np.ndarray
"""
# fx = self.configs['Camera.fx']
# fy = self.configs['Camera.fy']
# cx = self.configs['Camera.cx']
# cy = self.configs['Camera.cy']
Itc = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
return Itc
def pix_to_3dpt(self, rs, cs, in_cam=False):
"""
Get the 3D points of the pixels in RGB images.
:param rs: rows of interest in the RGB image.
It can be a list or 1D numpy array
which contains the row indices.
The default value is None,
which means all rows.
:param cs: columns of interest in the RGB image.
It can be a list or 1D numpy array
which contains the column indices.
The default value is None,
which means all columns.
:param in_cam: return points in camera frame,
otherwise, return points in base frame
:type rs: list or np.ndarray
:type cs: list or np.ndarray
:type in_cam: bool
:returns: tuple (pts, colors)
pts: point coordinates in world frame
(shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam
(shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
raise NotImplementedError
def get_current_pcd(self, in_cam=True):
"""
Return the point cloud at current time step (one frame only)
:param in_cam: return points in camera frame,
otherwise, return points in base frame
:type in_cam: bool
:returns: tuple (pts, colors)
pts: point coordinates in world frame (shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
raise NotImplementedError
@property
def state(self):
"""
Return the current pan and tilt joint angles of the robot camera.
:return:
pan_tilt: A list the form [pan angle, tilt angle]
:rtype: list
"""
return self.get_state()
def get_state(self):
"""
Return the current pan and tilt joint angles of the robot camera.
:return:
pan_tilt: A list the form [pan angle, tilt angle]
:rtype: list
"""
return [self.get_pan(), self.get_tilt()]
def get_pan(self):
"""
Return the current pan joint angle of the robot camera.
:return:
pan: Pan joint angle
:rtype: float
"""
return self.pan_joint.get_joint_position()
def get_tilt(self):
"""
Return the current tilt joint angle of the robot camera.
:return:
tilt: Tilt joint angle
:rtype: float
"""
return self.tilt_joint.get_joint_position()
def set_pan(self, pan, wait=True):
"""
Sets the pan joint angle to the specified value.
:param pan: value to be set for pan joint
:param wait: wait until the pan angle is set to
the target angle.
:type pan: float
:type wait: bool
"""
self.pan_joint.set_joint_position(pan)
# [self.sim.step() for _ in range(50)]
def set_tilt(self, tilt, wait=True):
"""
Sets the tilt joint angle to the specified value.
:param tilt: value to be set for the tilt joint
:param wait: wait until the tilt angle is set to
the target angle.
:type tilt: float
:type wait: bool
"""
self.tilt_joint.set_joint_position(tilt)
def set_pan_tilt(self, pan, tilt, wait=True):
"""
Sets both the pan and tilt joint angles to the specified values.
:param pan: value to be set for pan joint
:param tilt: value to be set for the tilt joint
:param wait: wait until the pan and tilt angles are set to
the target angles.
:type pan: float
:type tilt: float
:type wait: bool
"""
self.set_pan(pan)
self.set_tilt(tilt)
def reset(self):
"""
This function resets the pan and tilt joints by actuating
them to their home configuration.
"""
self.set_pan_tilt(self.configs.CAMERA.RESET_PAN, self.configs.CAMERA.RESET_TILT)
|
packs/hue/actions/rgb.py | jonico/st2contrib | 164 | 15278 | from lib import action
class RGBAction(action.BaseAction):
def run(self, light_id, red, green, blue, transition_time):
light = self.hue.lights.get(light_id)
light.rgb(red, green, blue, transition_time)
|
Dragon/python/dragon/vm/tensorflow/contrib/learn/datasets/base.py | neopenx/Dragon | 212 | 15301 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import time
import shutil
from six.moves import urllib
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except)
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.exists(work_directory):
os.makedirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
shutil.copy(temp_file_name, filepath)
size = os.path.getsize(filepath)
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
|
Algorithm.Python/OptionDataNullReferenceRegressionAlgorithm.py | BlackBoxAM/Lean | 6,580 | 15316 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### This algorithm is a regression test for issue #2018 and PR #2038.
### </summary>
class OptionDataNullReferenceRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2016, 12, 1)
self.SetEndDate(2017, 1, 1)
self.SetCash(500000)
self.AddEquity("DUST")
option = self.AddOption("DUST")
option.SetFilter(self.UniverseFunc)
def UniverseFunc(self, universe):
return universe.IncludeWeeklys().Strikes(-1, +1).Expiration(timedelta(25), timedelta(100))
|
examples/sharedlinks/sharedlinks-backend/links/models.py | gcbirzan/django-rest-registration | 329 | 15317 | from django.db import models
from django.contrib.auth.models import User
class Link(models.Model):
url = models.URLField()
title = models.CharField(max_length=255)
reporter = models.ForeignKey(
User,
on_delete=models.SET_NULL,
related_name='reported_links',
null=True,
blank=False,
)
def __str__(self):
return '{self.title} ({self.url})'.format(self=self)
def get_num_of_positive_votes(self):
return self.votes.filter(positive=True).count()
def get_num_of_negative_votes(self):
return self.votes.filter(negative=True).count()
class LinkVote(models.Model):
class Meta:
unique_together = (
('link', 'voter'),
)
link = models.ForeignKey(
Link,
on_delete=models.CASCADE,
related_name='votes',
)
voter = models.ForeignKey(
User,
on_delete=models.SET_NULL,
related_name='votes',
null=True,
blank=False,
)
positive = models.BooleanField()
negative = models.BooleanField()
def __str__(self):
if self.positive:
vote = 'positive'
elif self.negative:
vote = 'negative'
else:
vote = 'neutral'
return '{vote} vote for {self.link} by {self.voter}'.format(
vote=vote, self=self)
|
clint/textui/core.py | mpmman/clint | 1,230 | 15323 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""
clint.textui.core
~~~~~~~~~~~~~~~~~
Core TextUI functionality for Puts/Indent/Writer.
"""
from __future__ import absolute_import
import sys
from contextlib import contextmanager
from .formatters import max_width, min_width, _get_max_width_context
from .cols import columns
from ..utils import tsplit
__all__ = ('puts', 'puts_err', 'indent', 'dedent', 'columns', 'max_width',
'min_width', 'STDOUT', 'STDERR')
STDOUT = sys.stdout.write
STDERR = sys.stderr.write
NEWLINES = ('\n', '\r', '\r\n')
INDENT_STRINGS = []
# Private
def _indent(indent=0, quote='', indent_char=' '):
"""Indent util function, compute new indent_string"""
if indent > 0:
indent_string = ''.join((
str(quote),
(indent_char * (indent - len(quote)))
))
else:
indent_string = ''.join((
('\x08' * (-1 * (indent - len(quote)))),
str(quote))
)
if len(indent_string):
INDENT_STRINGS.append(indent_string)
# Public
def puts(s='', newline=True, stream=STDOUT):
"""Prints given string to stdout."""
max_width_ctx = _get_max_width_context()
if max_width_ctx:
cols, separator = max_width_ctx[-1]
s = max_width(s, cols, separator)
if newline:
s = tsplit(s, NEWLINES)
s = map(str, s)
indent = ''.join(INDENT_STRINGS)
s = (str('\n' + indent)).join(s)
_str = ''.join((
''.join(INDENT_STRINGS),
str(s),
'\n' if newline else ''
))
stream(_str)
def puts_err(s='', newline=True, stream=STDERR):
"""Prints given string to stderr."""
puts(s, newline, stream)
def dedent():
"""Dedent next strings, use only if you use indent otherwise than as a
context."""
INDENT_STRINGS.pop()
@contextmanager
def _indent_context():
"""Indentation context manager."""
try:
yield
finally:
dedent()
def indent(indent=4, quote=''):
"""Indentation manager, return an indentation context manager."""
_indent(indent, quote)
return _indent_context()
|
private_market/test.py | sigmoid3/Dapper | 974 | 15352 | <reponame>sigmoid3/Dapper<gh_stars>100-1000
from ethereum import tester as t
from ethereum import utils
def test():
s = t.state()
test_company = s.abi_contract('company.se', ADMIN_ACCOUNT=utils.decode_int(t.a0))
order_book = s.abi_contract('orders.se')
test_currency = s.abi_contract('currency.se', sender=t.k0)
assert test_company.getAdmin() == t.a0.encode('hex')
# Issue 1000 shares to user a1
test_company.issueShares(1000, t.a1, sender=t.k0)
# Issue 50000 coins to users a2 and a3
test_currency.sendCoin(50000, t.a2, sender=t.k0)
test_currency.sendCoin(50000, t.a3, sender=t.k0)
# User a1 can have as many shares as he wants, but must retain at
# least 800
test_company.setShareholderMaxShares(t.a1, 2**100, sender=t.k0)
test_company.setShareholderMinShares(t.a1, 800, sender=t.k0)
# User a2 can have up to 500 shares
test_company.setShareholderMaxShares(t.a2, 500, sender=t.k0)
# User a2 tries to give himself the right to unlimited shares,
# fails because he is not the admin
test_company.setShareholderMaxShares(t.a2, 2**100, sender=t.k2)
# A few sanity checks
assert test_company.getCurrentShareholdingsOf(t.a1) == 1000
assert test_company.getShareholderMinShares(t.a1) == 800
assert test_company.getShareholderMaxShares(t.a2) == 500
# User a1 transfers 150 shares to a2
assert test_company.sendCoin(150, t.a2, sender=t.k1) is True
# User a1 tries to transfer 150 shares to a2 again, fails because
# such a transaction would result a1 having 700 shares, which is
# below his limit
assert test_company.sendCoin(150, t.a2, sender=t.k1) is False
# Check shareholdings
assert test_company.getCurrentShareholdingsOf(t.a1) == 850
assert test_company.getCurrentShareholdingsOf(t.a2) == 150
# Authorize the order book contract to accept lockups
test_company.setContractAuthorized(order_book.address, True)
# User a1 puts up 50 shares for sale; however, he tries to do
# this without first authorizing the order book to withdraw so
# the operation fails
assert order_book.mkSellOrder(test_company.address, 50,
test_currency.address, 10000,
sender=t.k1) == -1
# Now, try to create the order properly
test_company.authorizeLockup(order_book.address, 50, sender=t.k1)
_id = order_book.mkSellOrder(test_company.address, 50,
test_currency.address, 10000, sender=t.k1)
assert _id >= 0
assert test_company.getLockedShareholdingsOf(t.a1) == 50
# Accept the order by a3. This should fail because a3 has not
# authorized the order_book to withdraw coins
assert order_book.claimSellOrder(_id, sender=t.k3) is False
# Do the authorization
test_currency.approveOnce(order_book.address, 10000, sender=t.k3)
# It should still fail because a3 is not authorized to hold shares
assert order_book.claimSellOrder(_id, sender=t.k3) is False
# Now do it properly
test_currency.approveOnce(order_book.address, 10000, sender=t.k2)
assert order_book.claimSellOrder(_id, sender=t.k2) is True
# Check shareholdings and balances
assert test_company.getCurrentShareholdingsOf(t.a1) == 800
assert test_company.getCurrentShareholdingsOf(t.a2) == 200
assert test_company.getLockedShareholdingsOf(t.a1) == 0
assert test_currency.coinBalanceOf(t.a1) == 10000
assert test_currency.coinBalanceOf(t.a2) == 40000
assert test_currency.coinBalanceOf(t.a3) == 50000
# Authorize a3 to hold shares
test_company.setShareholderMaxShares(t.a3, 500)
# A3 buys shares
test_currency.approveOnce(order_book.address, 20000, sender=t.k3)
_id2 = order_book.mkBuyOrder(test_company.address, 100,
test_currency.address, 20000, sender=t.k3)
assert _id2 >= 0, _id2
test_company.authorizeLockup(order_book.address, 100, sender=t.k2)
assert order_book.claimBuyOrder(_id2, sender=t.k2) is True
# Check shareholdings and balances
assert test_company.getCurrentShareholdingsOf(t.a1) == 800
assert test_company.getCurrentShareholdingsOf(t.a2) == 100
assert test_company.getCurrentShareholdingsOf(t.a3) == 100
assert test_company.getLockedShareholdingsOf(t.a1) == 0
assert test_currency.coinBalanceOf(t.a1) == 10000
assert test_currency.coinBalanceOf(t.a2) == 60000
assert test_currency.coinBalanceOf(t.a3) == 30000
if __name__ == '__main__':
test()
|
doc/python_api/examples/bpy.types.Depsgraph.1.py | rbabari/blender | 365 | 15373 | <gh_stars>100-1000
"""
Dependency graph: Evaluated ID example
++++++++++++++++++++++++++++++++++++++
This example demonstrates access to the evaluated ID (such as object, material, etc.) state from
an original ID.
This is needed every time one needs to access state with animation, constraints, and modifiers
taken into account.
"""
import bpy
class OBJECT_OT_evaluated_example(bpy.types.Operator):
"""Access evaluated object state and do something with it"""
bl_label = "DEG Access Evaluated Object"
bl_idname = "object.evaluated_example"
def execute(self, context):
# This is an original object. Its data does not have any modifiers applied.
obj = context.object
if obj is None or obj.type != 'MESH':
self.report({'INFO'}, "No active mesh object to get info from")
return {'CANCELLED'}
# Evaluated object exists within a specific dependency graph.
# We will request evaluated object from the dependency graph which corresponds to the
# current scene and view layer.
#
# NOTE: This call ensure the dependency graph is fully evaluated. This might be expensive
# if changes were made made to the scene, but is needed to ensure no dangling or incorrect
# pointers are exposed.
depsgraph = context.evaluated_depsgraph_get()
# Actually request evaluated object.
#
# This object has animation and drivers applied on it, together with constraints and
# modifiers.
#
# For mesh objects the object.data will be a mesh with all modifiers applied.
# This means that in access to vertices or faces after modifier stack happens via fields of
# object_eval.object.
#
# For other types of objects the object_eval.data does not have modifiers applied on it,
# but has animation applied.
#
# NOTE: All ID types have `evaluated_get()`, including materials, node trees, worlds.
object_eval = obj.evaluated_get(depsgraph)
mesh_eval = object_eval.data
self.report({'INFO'}, f"Number of evaluated vertices: {len(mesh_eval.vertices)}")
return {'FINISHED'}
def register():
bpy.utils.register_class(OBJECT_OT_evaluated_example)
def unregister():
bpy.utils.unregister_class(OBJECT_OT_evaluated_example)
if __name__ == "__main__":
register()
|
setup.py | zhuzhenping/hf_at_py | 130 | 15385 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/20 8:15
# @Author : HaiFeng
# @Email : <EMAIL>
from setuptools import setup
import os
this_directory = os.path.abspath(os.path.dirname(__file__))
# 读取文件内容
def read_file(filename):
with open(os.path.join(this_directory, filename), encoding='utf-8') as f:
desc = f.read()
return desc
# 获取依赖
def read_requirements(filename):
return [line.strip() for line in read_file(filename).splitlines()
if not line.startswith('#')]
long_description = read_file('readme.md')
long_description_content_type = 'text/markdown' # 指定包文档格式为markdown
# talib无需加入 os.system('pipreqs . --encoding=utf8 --force') # 生成 requirements.txt
setup(
name='hfpy', # 包名
python_requires='>=3.6.0', # python环境
version='0.2.2', # 包的版本
description="Hai Feng Future Trading Platform with SE", # 包简介,显示在PyPI
long_description=long_description, # 读取的Readme文档内容
long_description_content_type=long_description_content_type, # 指定包文档格式为markdown
author="HaiFeng", # 作者相关信息
author_email='<EMAIL>',
url='https://github.com/haifengat/hf_at_py',
# 指定包信息,还可以用find_packages()函数
# packages=find_packages(),
packages=['hfpy'],
install_requires=read_requirements('requirements.txt'), # 指定需要安装的依赖
include_package_data=True,
license="MIT License",
platforms="any",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
scripts/download_lookml.py | orf/lkml | 110 | 15532 | <filename>scripts/download_lookml.py
import os
import re
from base64 import b64decode
from pathlib import Path
import requests
username = os.environ["GITHUB_USERNAME"]
password = os.environ["GITHUB_PERSONAL_ACCESS_TOKEN"]
auth = requests.auth.HTTPBasicAuth(username, password)
directory = Path(__file__).resolve().parent.parent / "github"
directory.mkdir(exist_ok=True)
start_url = "https://api.github.com/search/code?q=view+language:lookml"
next_url = None
page = 1
with requests.Session() as session:
session.auth = auth
while True:
response = session.get(next_url or start_url)
response.raise_for_status()
links = response.headers["Link"]
finds = re.findall(
r"<(https://api.github.com/search/code\?"
r'q=view\+language%3Alookml&page=\d+)>; rel="next"',
links,
)
if finds:
next_url = finds[0]
else:
next_url = None
print(next_url)
urls = [item["url"] for item in response.json()["items"]]
print(f"Downloading all content from page {page}")
for url in urls:
response = session.get(url)
response.raise_for_status()
response_json = response.json()
name = response_json["name"]
encoded = response_json["content"]
content = b64decode(encoded).decode("utf-8")
if (
name.endswith(".lookml")
or content.startswith("-")
or "- view" in content
):
continue
file_path = directory / name
with file_path.open("w+") as file:
file.write(content)
if next_url is None:
break
else:
page += 1
|
m2-modified/ims/common/agentless-system-crawler/crawler/plugins/emitters/http_emitter.py | CCI-MOC/ABMI | 108 | 15543 | <reponame>CCI-MOC/ABMI<filename>m2-modified/ims/common/agentless-system-crawler/crawler/plugins/emitters/http_emitter.py
import logging
from iemit_plugin import IEmitter
from plugins.emitters.base_http_emitter import BaseHttpEmitter
logger = logging.getLogger('crawlutils')
class HttpEmitter(BaseHttpEmitter, IEmitter):
def get_emitter_protocol(self):
return 'http'
|
chainer_chemistry/links/update/ggnn_update.py | pfnet/chainerchem | 184 | 15549 | <gh_stars>100-1000
import chainer
from chainer import functions
from chainer import links
import chainer_chemistry
from chainer_chemistry.links.connection.graph_linear import GraphLinear
from chainer_chemistry.utils import is_sparse
class GGNNUpdate(chainer.Chain):
"""GGNN submodule for update part.
Args:
in_channels (int or None): input dim of feature vector for each node
hidden_channels (int): dimension of feature vector for each node
out_channels (int or None): output dime of feature vector for each node
When `None`, `hidden_channels` is used.
n_edge_types (int): number of types of edge
"""
def __init__(self, in_channels=None, hidden_channels=16,
out_channels=None, n_edge_types=4, **kwargs):
if out_channels is None:
out_channels = hidden_channels
super(GGNNUpdate, self).__init__()
if in_channels is None:
gru_in_channels = None
else:
gru_in_channels = in_channels + hidden_channels
with self.init_scope():
self.graph_linear = GraphLinear(
in_channels, n_edge_types * hidden_channels)
self.update_layer = links.GRU(gru_in_channels, out_channels)
self.n_edge_types = n_edge_types
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
def __call__(self, h, adj, **kwargs):
hidden_ch = self.hidden_channels
# --- Message part ---
mb, atom, in_ch = h.shape
m = functions.reshape(self.graph_linear(h),
(mb, atom, hidden_ch, self.n_edge_types))
# m: (minibatch, atom, ch, edge_type)
# Transpose
m = functions.transpose(m, (0, 3, 1, 2))
# m: (minibatch, edge_type, atom, ch)
# (minibatch * edge_type, atom, out_ch)
m = functions.reshape(m, (mb * self.n_edge_types, atom, hidden_ch))
if is_sparse(adj):
m = functions.sparse_matmul(adj, m)
else:
adj = functions.reshape(adj, (mb * self.n_edge_types, atom, atom))
m = chainer_chemistry.functions.matmul(adj, m)
# (minibatch * edge_type, atom, out_ch)
m = functions.reshape(m, (mb, self.n_edge_types, atom, hidden_ch))
m = functions.sum(m, axis=1)
# (minibatch, atom, out_ch)
# --- Update part ---
# Contraction
h = functions.reshape(h, (mb * atom, in_ch))
# Contraction
m = functions.reshape(m, (mb * atom, hidden_ch))
out_h = self.update_layer(functions.concat((h, m), axis=1))
# Expansion
out_h = functions.reshape(out_h, (mb, atom, self.out_channels))
return out_h
def reset_state(self):
self.update_layer.reset_state()
|
samples/ast/test.py | Ryoich/python_zero | 203 | 15550 | def func(a, b):
return a + b
def func2(a):
print(a)
print("Hello")
|
configs/models/aott.py | yoxu515/aot-benchmark | 105 | 15555 | import os
from .default import DefaultModelConfig
class ModelConfig(DefaultModelConfig):
def __init__(self):
super().__init__()
self.MODEL_NAME = 'AOTT'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.