code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Generated by Django 2.1.12 on 2019-12-06 15:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('rdrf', '0118_auto_20191125_1514'),
]
operations = [
migrations.CreateModel(
name='CustomAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=80)),
('name', models.CharField(blank=True, max_length=80, null=True)),
('action_type', models.CharField(choices=[('PR', 'Patient Report')], max_length=2)),
('data', models.TextField(null=True)),
('groups_allowed', models.ManyToManyField(blank=True, to='auth.Group')),
('registry', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rdrf.Registry')),
],
),
]
| muccg/rdrf | rdrf/rdrf/migrations/0119_customaction.py | Python | agpl-3.0 | 1,049 |
"""
wasi-sdk installation and maintenance
"""
from mod import log, wasisdk
def run(fips_dir, proj_dir, args):
if len(args) > 0:
cmd = args[0]
if cmd == 'install':
wasisdk.install(fips_dir)
elif cmd == 'uninstall':
wasisdk.uninstall(fips_dir)
else:
log.error("unknown subcommand '{}' (run './fips help wasisdk')".format(cmd))
else:
log.error("expected a subcommand (install or uninstall)")
def help():
log.info(log.YELLOW +
"fips wasisdk install\n"
"fips wasisdk uninstall\n"
+ log.DEF +
" install or uninstall the WASI SDK")
| floooh/fips | verbs/wasisdk.py | Python | mit | 682 |
from __future__ import absolute_import
import difflib
import errno
import functools
import io
import itertools
import getopt
import os, signal, subprocess, sys
import re
import stat
import platform
import shutil
import tempfile
import threading
import io
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from lit.ShCommands import GlobItem
import lit.ShUtil as ShUtil
import lit.Test as Test
import lit.util
from lit.util import to_bytes, to_string, to_unicode
from lit.BooleanExpression import BooleanExpression
class InternalShellError(Exception):
def __init__(self, command, message):
self.command = command
self.message = message
kIsWindows = platform.system() == 'Windows'
# Don't use close_fds on Windows.
kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
kAvoidDevNull = kIsWindows
kDevNull = "/dev/null"
# A regex that matches %dbg(ARG), which lit inserts at the beginning of each
# run command pipeline such that ARG specifies the pipeline's source line
# number. lit later expands each %dbg(ARG) to a command that behaves as a null
# command in the target shell so that the line number is seen in lit's verbose
# mode.
#
# This regex captures ARG. ARG must not contain a right parenthesis, which
# terminates %dbg. ARG must not contain quotes, in which ARG might be enclosed
# during expansion.
kPdbgRegex = '%dbg\\(([^)\'"]*)\\)'
class ShellEnvironment(object):
"""Mutable shell environment containing things like CWD and env vars.
Environment variables are not implemented, but cwd tracking is.
"""
def __init__(self, cwd, env):
self.cwd = cwd
self.env = dict(env)
class TimeoutHelper(object):
"""
Object used to helper manage enforcing a timeout in
_executeShCmd(). It is passed through recursive calls
to collect processes that have been executed so that when
the timeout happens they can be killed.
"""
def __init__(self, timeout):
self.timeout = timeout
self._procs = []
self._timeoutReached = False
self._doneKillPass = False
# This lock will be used to protect concurrent access
# to _procs and _doneKillPass
self._lock = None
self._timer = None
def cancel(self):
if not self.active():
return
self._timer.cancel()
def active(self):
return self.timeout > 0
def addProcess(self, proc):
if not self.active():
return
needToRunKill = False
with self._lock:
self._procs.append(proc)
# Avoid re-entering the lock by finding out if kill needs to be run
# again here but call it if necessary once we have left the lock.
# We could use a reentrant lock here instead but this code seems
# clearer to me.
needToRunKill = self._doneKillPass
# The initial call to _kill() from the timer thread already happened so
# we need to call it again from this thread, otherwise this process
# will be left to run even though the timeout was already hit
if needToRunKill:
assert self.timeoutReached()
self._kill()
def startTimer(self):
if not self.active():
return
# Do some late initialisation that's only needed
# if there is a timeout set
self._lock = threading.Lock()
self._timer = threading.Timer(self.timeout, self._handleTimeoutReached)
self._timer.start()
def _handleTimeoutReached(self):
self._timeoutReached = True
self._kill()
def timeoutReached(self):
return self._timeoutReached
def _kill(self):
"""
This method may be called multiple times as we might get unlucky
and be in the middle of creating a new process in _executeShCmd()
which won't yet be in ``self._procs``. By locking here and in
addProcess() we should be able to kill processes launched after
the initial call to _kill()
"""
with self._lock:
for p in self._procs:
lit.util.killProcessAndChildren(p.pid)
# Empty the list and note that we've done a pass over the list
self._procs = [] # Python2 doesn't have list.clear()
self._doneKillPass = True
class ShellCommandResult(object):
"""Captures the result of an individual command."""
def __init__(self, command, stdout, stderr, exitCode, timeoutReached,
outputFiles = []):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.exitCode = exitCode
self.timeoutReached = timeoutReached
self.outputFiles = list(outputFiles)
def executeShCmd(cmd, shenv, results, timeout=0):
"""
Wrapper around _executeShCmd that handles
timeout
"""
# Use the helper even when no timeout is required to make
# other code simpler (i.e. avoid bunch of ``!= None`` checks)
timeoutHelper = TimeoutHelper(timeout)
if timeout > 0:
timeoutHelper.startTimer()
finalExitCode = _executeShCmd(cmd, shenv, results, timeoutHelper)
timeoutHelper.cancel()
timeoutInfo = None
if timeoutHelper.timeoutReached():
timeoutInfo = 'Reached timeout of {} seconds'.format(timeout)
return (finalExitCode, timeoutInfo)
def expand_glob(arg, cwd):
if isinstance(arg, GlobItem):
return sorted(arg.resolve(cwd))
return [arg]
def expand_glob_expressions(args, cwd):
result = [args[0]]
for arg in args[1:]:
result.extend(expand_glob(arg, cwd))
return result
def quote_windows_command(seq):
"""
Reimplement Python's private subprocess.list2cmdline for MSys compatibility
Based on CPython implementation here:
https://hg.python.org/cpython/file/849826a900d2/Lib/subprocess.py#l422
Some core util distributions (MSys) don't tokenize command line arguments
the same way that MSVC CRT does. Lit rolls its own quoting logic similar to
the stock CPython logic to paper over these quoting and tokenization rule
differences.
We use the same algorithm from MSDN as CPython
(http://msdn.microsoft.com/en-us/library/17w5ykft.aspx), but we treat more
characters as needing quoting, such as double quotes themselves.
"""
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
# This logic differs from upstream list2cmdline.
needquote = (" " in arg) or ("\t" in arg) or ("\"" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# cmd is export or env
def updateEnv(env, cmd):
arg_idx = 1
unset_next_env_var = False
for arg_idx, arg in enumerate(cmd.args[1:]):
# Support for the -u flag (unsetting) for env command
# e.g., env -u FOO -u BAR will remove both FOO and BAR
# from the environment.
if arg == '-u':
unset_next_env_var = True
continue
if unset_next_env_var:
unset_next_env_var = False
if arg in env.env:
del env.env[arg]
continue
# Partition the string into KEY=VALUE.
key, eq, val = arg.partition('=')
# Stop if there was no equals.
if eq == '':
break
env.env[key] = val
cmd.args = cmd.args[arg_idx+1:]
def executeBuiltinEcho(cmd, shenv):
"""Interpret a redirected echo command"""
opened_files = []
stdin, stdout, stderr = processRedirects(cmd, subprocess.PIPE, shenv,
opened_files)
if stdin != subprocess.PIPE or stderr != subprocess.PIPE:
raise InternalShellError(
cmd, "stdin and stderr redirects not supported for echo")
# Some tests have un-redirected echo commands to help debug test failures.
# Buffer our output and return it to the caller.
is_redirected = True
encode = lambda x : x
if stdout == subprocess.PIPE:
is_redirected = False
stdout = StringIO()
elif kIsWindows:
# Reopen stdout in binary mode to avoid CRLF translation. The versions
# of echo we are replacing on Windows all emit plain LF, and the LLVM
# tests now depend on this.
# When we open as binary, however, this also means that we have to write
# 'bytes' objects to stdout instead of 'str' objects.
encode = lit.util.to_bytes
stdout = open(stdout.name, stdout.mode + 'b')
opened_files.append((None, None, stdout, None))
# Implement echo flags. We only support -e and -n, and not yet in
# combination. We have to ignore unknown flags, because `echo "-D FOO"`
# prints the dash.
args = cmd.args[1:]
interpret_escapes = False
write_newline = True
while len(args) >= 1 and args[0] in ('-e', '-n'):
flag = args[0]
args = args[1:]
if flag == '-e':
interpret_escapes = True
elif flag == '-n':
write_newline = False
def maybeUnescape(arg):
if not interpret_escapes:
return arg
arg = lit.util.to_bytes(arg)
codec = 'string_escape' if sys.version_info < (3,0) else 'unicode_escape'
return arg.decode(codec)
if args:
for arg in args[:-1]:
stdout.write(encode(maybeUnescape(arg)))
stdout.write(encode(' '))
stdout.write(encode(maybeUnescape(args[-1])))
if write_newline:
stdout.write(encode('\n'))
for (name, mode, f, path) in opened_files:
f.close()
if not is_redirected:
return stdout.getvalue()
return ""
def executeBuiltinMkdir(cmd, cmd_shenv):
"""executeBuiltinMkdir - Create new directories."""
args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
try:
opts, args = getopt.gnu_getopt(args, 'p')
except getopt.GetoptError as err:
raise InternalShellError(cmd, "Unsupported: 'mkdir': %s" % str(err))
parent = False
for o, a in opts:
if o == "-p":
parent = True
else:
assert False, "unhandled option"
if len(args) == 0:
raise InternalShellError(cmd, "Error: 'mkdir' is missing an operand")
stderr = StringIO()
exitCode = 0
for dir in args:
cwd = cmd_shenv.cwd
dir = to_unicode(dir) if kIsWindows else to_bytes(dir)
cwd = to_unicode(cwd) if kIsWindows else to_bytes(cwd)
if not os.path.isabs(dir):
dir = os.path.realpath(os.path.join(cwd, dir))
if parent:
lit.util.mkdir_p(dir)
else:
try:
lit.util.mkdir(dir)
except OSError as err:
stderr.write("Error: 'mkdir' command failed, %s\n" % str(err))
exitCode = 1
return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
def executeBuiltinDiff(cmd, cmd_shenv):
"""executeBuiltinDiff - Compare files line by line."""
args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
try:
opts, args = getopt.gnu_getopt(args, "wbur", ["strip-trailing-cr"])
except getopt.GetoptError as err:
raise InternalShellError(cmd, "Unsupported: 'diff': %s" % str(err))
filelines, filepaths, dir_trees = ([] for i in range(3))
ignore_all_space = False
ignore_space_change = False
unified_diff = False
recursive_diff = False
strip_trailing_cr = False
for o, a in opts:
if o == "-w":
ignore_all_space = True
elif o == "-b":
ignore_space_change = True
elif o == "-u":
unified_diff = True
elif o == "-r":
recursive_diff = True
elif o == "--strip-trailing-cr":
strip_trailing_cr = True
else:
assert False, "unhandled option"
if len(args) != 2:
raise InternalShellError(cmd, "Error: missing or extra operand")
def getDirTree(path, basedir=""):
# Tree is a tuple of form (dirname, child_trees).
# An empty dir has child_trees = [], a file has child_trees = None.
child_trees = []
for dirname, child_dirs, files in os.walk(os.path.join(basedir, path)):
for child_dir in child_dirs:
child_trees.append(getDirTree(child_dir, dirname))
for filename in files:
child_trees.append((filename, None))
return path, sorted(child_trees)
def compareTwoFiles(filepaths):
compare_bytes = False
encoding = None
filelines = []
for file in filepaths:
try:
with open(file, 'r') as f:
filelines.append(f.readlines())
except UnicodeDecodeError:
try:
with io.open(file, 'r', encoding="utf-8") as f:
filelines.append(f.readlines())
encoding = "utf-8"
except:
compare_bytes = True
if compare_bytes:
return compareTwoBinaryFiles(filepaths)
else:
return compareTwoTextFiles(filepaths, encoding)
def compareTwoBinaryFiles(filepaths):
filelines = []
for file in filepaths:
with open(file, 'rb') as f:
filelines.append(f.readlines())
exitCode = 0
if hasattr(difflib, 'diff_bytes'):
# python 3.5 or newer
diffs = difflib.diff_bytes(difflib.unified_diff, filelines[0], filelines[1], filepaths[0].encode(), filepaths[1].encode())
diffs = [diff.decode() for diff in diffs]
else:
# python 2.7
func = difflib.unified_diff if unified_diff else difflib.context_diff
diffs = func(filelines[0], filelines[1], filepaths[0], filepaths[1])
for diff in diffs:
stdout.write(diff)
exitCode = 1
return exitCode
def compareTwoTextFiles(filepaths, encoding):
filelines = []
for file in filepaths:
if encoding is None:
with open(file, 'r') as f:
filelines.append(f.readlines())
else:
with io.open(file, 'r', encoding=encoding) as f:
filelines.append(f.readlines())
exitCode = 0
def compose2(f, g):
return lambda x: f(g(x))
f = lambda x: x
if strip_trailing_cr:
f = compose2(lambda line: line.rstrip('\r'), f)
if ignore_all_space or ignore_space_change:
ignoreSpace = lambda line, separator: separator.join(line.split())
ignoreAllSpaceOrSpaceChange = functools.partial(ignoreSpace, separator='' if ignore_all_space else ' ')
f = compose2(ignoreAllSpaceOrSpaceChange, f)
for idx, lines in enumerate(filelines):
filelines[idx]= [f(line) for line in lines]
func = difflib.unified_diff if unified_diff else difflib.context_diff
for diff in func(filelines[0], filelines[1], filepaths[0], filepaths[1]):
stdout.write(diff)
exitCode = 1
return exitCode
def printDirVsFile(dir_path, file_path):
if os.path.getsize(file_path):
msg = "File %s is a directory while file %s is a regular file"
else:
msg = "File %s is a directory while file %s is a regular empty file"
stdout.write(msg % (dir_path, file_path) + "\n")
def printFileVsDir(file_path, dir_path):
if os.path.getsize(file_path):
msg = "File %s is a regular file while file %s is a directory"
else:
msg = "File %s is a regular empty file while file %s is a directory"
stdout.write(msg % (file_path, dir_path) + "\n")
def printOnlyIn(basedir, path, name):
stdout.write("Only in %s: %s\n" % (os.path.join(basedir, path), name))
def compareDirTrees(dir_trees, base_paths=["", ""]):
# Dirnames of the trees are not checked, it's caller's responsibility,
# as top-level dirnames are always different. Base paths are important
# for doing os.walk, but we don't put it into tree's dirname in order
# to speed up string comparison below and while sorting in getDirTree.
left_tree, right_tree = dir_trees[0], dir_trees[1]
left_base, right_base = base_paths[0], base_paths[1]
# Compare two files or report file vs. directory mismatch.
if left_tree[1] is None and right_tree[1] is None:
return compareTwoFiles([os.path.join(left_base, left_tree[0]),
os.path.join(right_base, right_tree[0])])
if left_tree[1] is None and right_tree[1] is not None:
printFileVsDir(os.path.join(left_base, left_tree[0]),
os.path.join(right_base, right_tree[0]))
return 1
if left_tree[1] is not None and right_tree[1] is None:
printDirVsFile(os.path.join(left_base, left_tree[0]),
os.path.join(right_base, right_tree[0]))
return 1
# Compare two directories via recursive use of compareDirTrees.
exitCode = 0
left_names = [node[0] for node in left_tree[1]]
right_names = [node[0] for node in right_tree[1]]
l, r = 0, 0
while l < len(left_names) and r < len(right_names):
# Names are sorted in getDirTree, rely on that order.
if left_names[l] < right_names[r]:
exitCode = 1
printOnlyIn(left_base, left_tree[0], left_names[l])
l += 1
elif left_names[l] > right_names[r]:
exitCode = 1
printOnlyIn(right_base, right_tree[0], right_names[r])
r += 1
else:
exitCode |= compareDirTrees([left_tree[1][l], right_tree[1][r]],
[os.path.join(left_base, left_tree[0]),
os.path.join(right_base, right_tree[0])])
l += 1
r += 1
# At least one of the trees has ended. Report names from the other tree.
while l < len(left_names):
exitCode = 1
printOnlyIn(left_base, left_tree[0], left_names[l])
l += 1
while r < len(right_names):
exitCode = 1
printOnlyIn(right_base, right_tree[0], right_names[r])
r += 1
return exitCode
stderr = StringIO()
stdout = StringIO()
exitCode = 0
try:
for file in args:
if not os.path.isabs(file):
file = os.path.realpath(os.path.join(cmd_shenv.cwd, file))
if recursive_diff:
dir_trees.append(getDirTree(file))
else:
filepaths.append(file)
if not recursive_diff:
exitCode = compareTwoFiles(filepaths)
else:
exitCode = compareDirTrees(dir_trees)
except IOError as err:
stderr.write("Error: 'diff' command failed, %s\n" % str(err))
exitCode = 1
return ShellCommandResult(cmd, stdout.getvalue(), stderr.getvalue(), exitCode, False)
def executeBuiltinRm(cmd, cmd_shenv):
"""executeBuiltinRm - Removes (deletes) files or directories."""
args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
try:
opts, args = getopt.gnu_getopt(args, "frR", ["--recursive"])
except getopt.GetoptError as err:
raise InternalShellError(cmd, "Unsupported: 'rm': %s" % str(err))
force = False
recursive = False
for o, a in opts:
if o == "-f":
force = True
elif o in ("-r", "-R", "--recursive"):
recursive = True
else:
assert False, "unhandled option"
if len(args) == 0:
raise InternalShellError(cmd, "Error: 'rm' is missing an operand")
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and remove it.
os.chmod(path, stat.S_IMODE( os.stat(path).st_mode) | stat.S_IWRITE)
os.remove(path)
stderr = StringIO()
exitCode = 0
for path in args:
cwd = cmd_shenv.cwd
path = to_unicode(path) if kIsWindows else to_bytes(path)
cwd = to_unicode(cwd) if kIsWindows else to_bytes(cwd)
if not os.path.isabs(path):
path = os.path.realpath(os.path.join(cwd, path))
if force and not os.path.exists(path):
continue
try:
if os.path.isdir(path):
if not recursive:
stderr.write("Error: %s is a directory\n" % path)
exitCode = 1
if platform.system() == 'Windows':
# NOTE: use ctypes to access `SHFileOperationsW` on Windows to
# use the NT style path to get access to long file paths which
# cannot be removed otherwise.
from ctypes.wintypes import BOOL, HWND, LPCWSTR, UINT, WORD
from ctypes import addressof, byref, c_void_p, create_unicode_buffer
from ctypes import Structure
from ctypes import windll, WinError, POINTER
class SHFILEOPSTRUCTW(Structure):
_fields_ = [
('hWnd', HWND),
('wFunc', UINT),
('pFrom', LPCWSTR),
('pTo', LPCWSTR),
('fFlags', WORD),
('fAnyOperationsAborted', BOOL),
('hNameMappings', c_void_p),
('lpszProgressTitle', LPCWSTR),
]
FO_MOVE, FO_COPY, FO_DELETE, FO_RENAME = range(1, 5)
FOF_SILENT = 4
FOF_NOCONFIRMATION = 16
FOF_NOCONFIRMMKDIR = 512
FOF_NOERRORUI = 1024
FOF_NO_UI = FOF_SILENT | FOF_NOCONFIRMATION | FOF_NOERRORUI | FOF_NOCONFIRMMKDIR
SHFileOperationW = windll.shell32.SHFileOperationW
SHFileOperationW.argtypes = [POINTER(SHFILEOPSTRUCTW)]
path = os.path.abspath(path)
pFrom = create_unicode_buffer(path, len(path) + 2)
pFrom[len(path)] = pFrom[len(path) + 1] = '\0'
operation = SHFILEOPSTRUCTW(wFunc=UINT(FO_DELETE),
pFrom=LPCWSTR(addressof(pFrom)),
fFlags=FOF_NO_UI)
result = SHFileOperationW(byref(operation))
if result:
raise WinError(result)
else:
shutil.rmtree(path, onerror = on_rm_error if force else None)
else:
if force and not os.access(path, os.W_OK):
os.chmod(path,
stat.S_IMODE(os.stat(path).st_mode) | stat.S_IWRITE)
os.remove(path)
except OSError as err:
stderr.write("Error: 'rm' command failed, %s" % str(err))
exitCode = 1
return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
def processRedirects(cmd, stdin_source, cmd_shenv, opened_files):
"""Return the standard fds for cmd after applying redirects
Returns the three standard file descriptors for the new child process. Each
fd may be an open, writable file object or a sentinel value from the
subprocess module.
"""
# Apply the redirections, we use (N,) as a sentinel to indicate stdin,
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
# from a file are represented with a list [file, mode, file-object]
# where file-object is initially None.
redirects = [(0,), (1,), (2,)]
for (op, filename) in cmd.redirects:
if op == ('>',2):
redirects[2] = [filename, 'w', None]
elif op == ('>>',2):
redirects[2] = [filename, 'a', None]
elif op == ('>&',2) and filename in '012':
redirects[2] = redirects[int(filename)]
elif op == ('>&',) or op == ('&>',):
redirects[1] = redirects[2] = [filename, 'w', None]
elif op == ('>',):
redirects[1] = [filename, 'w', None]
elif op == ('>>',):
redirects[1] = [filename, 'a', None]
elif op == ('<',):
redirects[0] = [filename, 'r', None]
else:
raise InternalShellError(cmd, "Unsupported redirect: %r" % ((op, filename),))
# Open file descriptors in a second pass.
std_fds = [None, None, None]
for (index, r) in enumerate(redirects):
# Handle the sentinel values for defaults up front.
if isinstance(r, tuple):
if r == (0,):
fd = stdin_source
elif r == (1,):
if index == 0:
raise InternalShellError(cmd, "Unsupported redirect for stdin")
elif index == 1:
fd = subprocess.PIPE
else:
fd = subprocess.STDOUT
elif r == (2,):
if index != 2:
raise InternalShellError(cmd, "Unsupported redirect on stdout")
fd = subprocess.PIPE
else:
raise InternalShellError(cmd, "Bad redirect")
std_fds[index] = fd
continue
(filename, mode, fd) = r
# Check if we already have an open fd. This can happen if stdout and
# stderr go to the same place.
if fd is not None:
std_fds[index] = fd
continue
redir_filename = None
name = expand_glob(filename, cmd_shenv.cwd)
if len(name) != 1:
raise InternalShellError(cmd, "Unsupported: glob in "
"redirect expanded to multiple files")
name = name[0]
if kAvoidDevNull and name == kDevNull:
fd = tempfile.TemporaryFile(mode=mode)
elif kIsWindows and name == '/dev/tty':
# Simulate /dev/tty on Windows.
# "CON" is a special filename for the console.
fd = open("CON", mode)
else:
# Make sure relative paths are relative to the cwd.
redir_filename = os.path.join(cmd_shenv.cwd, name)
redir_filename = to_unicode(redir_filename) \
if kIsWindows else to_bytes(redir_filename)
fd = open(redir_filename, mode)
# Workaround a Win32 and/or subprocess bug when appending.
#
# FIXME: Actually, this is probably an instance of PR6753.
if mode == 'a':
fd.seek(0, 2)
# Mutate the underlying redirect list so that we can redirect stdout
# and stderr to the same place without opening the file twice.
r[2] = fd
opened_files.append((filename, mode, fd) + (redir_filename,))
std_fds[index] = fd
return std_fds
def _executeShCmd(cmd, shenv, results, timeoutHelper):
if timeoutHelper.timeoutReached():
# Prevent further recursion if the timeout has been hit
# as we should try avoid launching more processes.
return None
if isinstance(cmd, ShUtil.Seq):
if cmd.op == ';':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
return _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
if cmd.op == '&':
raise InternalShellError(cmd,"unsupported shell operator: '&'")
if cmd.op == '||':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
if res != 0:
res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
return res
if cmd.op == '&&':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
if res is None:
return res
if res == 0:
res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
return res
raise ValueError('Unknown shell command: %r' % cmd.op)
assert isinstance(cmd, ShUtil.Pipeline)
# Handle shell builtins first.
if cmd.commands[0].args[0] == 'cd':
if len(cmd.commands) != 1:
raise ValueError("'cd' cannot be part of a pipeline")
if len(cmd.commands[0].args) != 2:
raise ValueError("'cd' supports only one argument")
newdir = cmd.commands[0].args[1]
# Update the cwd in the parent environment.
if os.path.isabs(newdir):
shenv.cwd = newdir
else:
shenv.cwd = os.path.realpath(os.path.join(shenv.cwd, newdir))
# The cd builtin always succeeds. If the directory does not exist, the
# following Popen calls will fail instead.
return 0
# Handle "echo" as a builtin if it is not part of a pipeline. This greatly
# speeds up tests that construct input files by repeatedly echo-appending to
# a file.
# FIXME: Standardize on the builtin echo implementation. We can use a
# temporary file to sidestep blocking pipe write issues.
if cmd.commands[0].args[0] == 'echo' and len(cmd.commands) == 1:
output = executeBuiltinEcho(cmd.commands[0], shenv)
results.append(ShellCommandResult(cmd.commands[0], output, "", 0,
False))
return 0
if cmd.commands[0].args[0] == 'export':
if len(cmd.commands) != 1:
raise ValueError("'export' cannot be part of a pipeline")
if len(cmd.commands[0].args) != 2:
raise ValueError("'export' supports only one argument")
updateEnv(shenv, cmd.commands[0])
return 0
if cmd.commands[0].args[0] == 'mkdir':
if len(cmd.commands) != 1:
raise InternalShellError(cmd.commands[0], "Unsupported: 'mkdir' "
"cannot be part of a pipeline")
cmdResult = executeBuiltinMkdir(cmd.commands[0], shenv)
results.append(cmdResult)
return cmdResult.exitCode
if cmd.commands[0].args[0] == 'diff':
if len(cmd.commands) != 1:
raise InternalShellError(cmd.commands[0], "Unsupported: 'diff' "
"cannot be part of a pipeline")
cmdResult = executeBuiltinDiff(cmd.commands[0], shenv)
results.append(cmdResult)
return cmdResult.exitCode
if cmd.commands[0].args[0] == 'rm':
if len(cmd.commands) != 1:
raise InternalShellError(cmd.commands[0], "Unsupported: 'rm' "
"cannot be part of a pipeline")
cmdResult = executeBuiltinRm(cmd.commands[0], shenv)
results.append(cmdResult)
return cmdResult.exitCode
if cmd.commands[0].args[0] == ':':
if len(cmd.commands) != 1:
raise InternalShellError(cmd.commands[0], "Unsupported: ':' "
"cannot be part of a pipeline")
results.append(ShellCommandResult(cmd.commands[0], '', '', 0, False))
return 0;
procs = []
default_stdin = subprocess.PIPE
stderrTempFiles = []
opened_files = []
named_temp_files = []
builtin_commands = set(['cat'])
builtin_commands_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "builtin_commands")
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
for i,j in enumerate(cmd.commands):
# Reference the global environment by default.
cmd_shenv = shenv
if j.args[0] == 'env':
# Create a copy of the global environment and modify it for this one
# command. There might be multiple envs in a pipeline:
# env FOO=1 llc < %s | env BAR=2 llvm-mc | FileCheck %s
cmd_shenv = ShellEnvironment(shenv.cwd, shenv.env)
updateEnv(cmd_shenv, j)
stdin, stdout, stderr = processRedirects(j, default_stdin, cmd_shenv,
opened_files)
# If stderr wants to come from stdout, but stdout isn't a pipe, then put
# stderr on a pipe and treat it as stdout.
if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE):
stderr = subprocess.PIPE
stderrIsStdout = True
else:
stderrIsStdout = False
# Don't allow stderr on a PIPE except for the last
# process, this could deadlock.
#
# FIXME: This is slow, but so is deadlock.
if stderr == subprocess.PIPE and j != cmd.commands[-1]:
stderr = tempfile.TemporaryFile(mode='w+b')
stderrTempFiles.append((i, stderr))
# Resolve the executable path ourselves.
args = list(j.args)
executable = None
is_builtin_cmd = args[0] in builtin_commands;
if not is_builtin_cmd:
# For paths relative to cwd, use the cwd of the shell environment.
if args[0].startswith('.'):
exe_in_cwd = os.path.join(cmd_shenv.cwd, args[0])
if os.path.isfile(exe_in_cwd):
executable = exe_in_cwd
if not executable:
executable = lit.util.which(args[0], cmd_shenv.env['PATH'])
if not executable:
raise InternalShellError(j, '%r: command not found' % j.args[0])
# Replace uses of /dev/null with temporary files.
if kAvoidDevNull:
# In Python 2.x, basestring is the base class for all string (including unicode)
# In Python 3.x, basestring no longer exist and str is always unicode
try:
str_type = basestring
except NameError:
str_type = str
for i,arg in enumerate(args):
if isinstance(arg, str_type) and kDevNull in arg:
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
named_temp_files.append(f.name)
args[i] = arg.replace(kDevNull, f.name)
# Expand all glob expressions
args = expand_glob_expressions(args, cmd_shenv.cwd)
if is_builtin_cmd:
args.insert(0, sys.executable)
args[1] = os.path.join(builtin_commands_dir ,args[1] + ".py")
# On Windows, do our own command line quoting for better compatibility
# with some core utility distributions.
if kIsWindows:
args = quote_windows_command(args)
try:
procs.append(subprocess.Popen(args, cwd=cmd_shenv.cwd,
executable = executable,
stdin = stdin,
stdout = stdout,
stderr = stderr,
env = cmd_shenv.env,
close_fds = kUseCloseFDs))
# Let the helper know about this process
timeoutHelper.addProcess(procs[-1])
except OSError as e:
raise InternalShellError(j, 'Could not create process ({}) due to {}'.format(executable, e))
# Immediately close stdin for any process taking stdin from us.
if stdin == subprocess.PIPE:
procs[-1].stdin.close()
procs[-1].stdin = None
# Update the current stdin source.
if stdout == subprocess.PIPE:
default_stdin = procs[-1].stdout
elif stderrIsStdout:
default_stdin = procs[-1].stderr
else:
default_stdin = subprocess.PIPE
# Explicitly close any redirected files. We need to do this now because we
# need to release any handles we may have on the temporary files (important
# on Win32, for example). Since we have already spawned the subprocess, our
# handles have already been transferred so we do not need them anymore.
for (name, mode, f, path) in opened_files:
f.close()
# FIXME: There is probably still deadlock potential here. Yawn.
procData = [None] * len(procs)
procData[-1] = procs[-1].communicate()
for i in range(len(procs) - 1):
if procs[i].stdout is not None:
out = procs[i].stdout.read()
else:
out = ''
if procs[i].stderr is not None:
err = procs[i].stderr.read()
else:
err = ''
procData[i] = (out,err)
# Read stderr out of the temp files.
for i,f in stderrTempFiles:
f.seek(0, 0)
procData[i] = (procData[i][0], f.read())
f.close()
exitCode = None
for i,(out,err) in enumerate(procData):
res = procs[i].wait()
# Detect Ctrl-C in subprocess.
if res == -signal.SIGINT:
raise KeyboardInterrupt
# Ensure the resulting output is always of string type.
try:
if out is None:
out = ''
else:
out = to_string(out.decode('utf-8', errors='replace'))
except:
out = str(out)
try:
if err is None:
err = ''
else:
err = to_string(err.decode('utf-8', errors='replace'))
except:
err = str(err)
# Gather the redirected output files for failed commands.
output_files = []
if res != 0:
for (name, mode, f, path) in sorted(opened_files):
if path is not None and mode in ('w', 'a'):
try:
with open(path, 'rb') as f:
data = f.read()
except:
data = None
if data is not None:
output_files.append((name, path, data))
results.append(ShellCommandResult(
cmd.commands[i], out, err, res, timeoutHelper.timeoutReached(),
output_files))
if cmd.pipe_err:
# Take the last failing exit code from the pipeline.
if not exitCode or res != 0:
exitCode = res
else:
exitCode = res
# Remove any named temporary files we created.
for f in named_temp_files:
try:
os.remove(f)
except OSError:
pass
if cmd.negate:
exitCode = not exitCode
return exitCode
def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
cmds = []
for i, ln in enumerate(commands):
ln = commands[i] = re.sub(kPdbgRegex, ": '\\1'; ", ln)
try:
cmds.append(ShUtil.ShParser(ln, litConfig.isWindows,
test.config.pipefail).parse())
except:
return lit.Test.Result(Test.FAIL, "shell parser error on: %r" % ln)
cmd = cmds[0]
for c in cmds[1:]:
cmd = ShUtil.Seq(cmd, '&&', c)
results = []
timeoutInfo = None
try:
shenv = ShellEnvironment(cwd, test.config.environment)
exitCode, timeoutInfo = executeShCmd(cmd, shenv, results, timeout=litConfig.maxIndividualTestTime)
except InternalShellError:
e = sys.exc_info()[1]
exitCode = 127
results.append(
ShellCommandResult(e.command, '', e.message, exitCode, False))
out = err = ''
for i,result in enumerate(results):
# Write the command line run.
out += '$ %s\n' % (' '.join('"%s"' % s
for s in result.command.args),)
# If nothing interesting happened, move on.
if litConfig.maxIndividualTestTime == 0 and \
result.exitCode == 0 and \
not result.stdout.strip() and not result.stderr.strip():
continue
# Otherwise, something failed or was printed, show it.
# Add the command output, if redirected.
for (name, path, data) in result.outputFiles:
if data.strip():
out += "# redirected output from %r:\n" % (name,)
data = to_string(data.decode('utf-8', errors='replace'))
if len(data) > 1024:
out += data[:1024] + "\n...\n"
out += "note: data was truncated\n"
else:
out += data
out += "\n"
if result.stdout.strip():
out += '# command output:\n%s\n' % (result.stdout,)
if result.stderr.strip():
out += '# command stderr:\n%s\n' % (result.stderr,)
if not result.stdout.strip() and not result.stderr.strip():
out += "note: command had no output on stdout or stderr\n"
# Show the error conditions:
if result.exitCode != 0:
# On Windows, a negative exit code indicates a signal, and those are
# easier to recognize or look up if we print them in hex.
if litConfig.isWindows and result.exitCode < 0:
codeStr = hex(int(result.exitCode & 0xFFFFFFFF)).rstrip("L")
else:
codeStr = str(result.exitCode)
out += "error: command failed with exit status: %s\n" % (
codeStr,)
if litConfig.maxIndividualTestTime > 0:
out += 'error: command reached timeout: %s\n' % (
str(result.timeoutReached),)
return out, err, exitCode, timeoutInfo
def executeScript(test, litConfig, tmpBase, commands, cwd):
bashPath = litConfig.getBashPath()
isWin32CMDEXE = (litConfig.isWindows and not bashPath)
script = tmpBase + '.script'
if isWin32CMDEXE:
script += '.bat'
# Write script file
mode = 'w'
open_kwargs = {}
if litConfig.isWindows and not isWin32CMDEXE:
mode += 'b' # Avoid CRLFs when writing bash scripts.
elif sys.version_info > (3,0):
open_kwargs['encoding'] = 'utf-8'
f = open(script, mode, **open_kwargs)
if isWin32CMDEXE:
for i, ln in enumerate(commands):
commands[i] = re.sub(kPdbgRegex, "echo '\\1' > nul && ", ln)
if litConfig.echo_all_commands:
f.write('@echo on\n')
else:
f.write('@echo off\n')
f.write('\n@if %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
for i, ln in enumerate(commands):
commands[i] = re.sub(kPdbgRegex, ": '\\1'; ", ln)
if test.config.pipefail:
f.write(b'set -o pipefail;' if mode == 'wb' else 'set -o pipefail;')
if litConfig.echo_all_commands:
f.write(b'set -x;' if mode == 'wb' else 'set -x;')
if sys.version_info > (3,0) and mode == 'wb':
f.write(bytes('{ ' + '; } &&\n{ '.join(commands) + '; }', 'utf-8'))
else:
f.write('{ ' + '; } &&\n{ '.join(commands) + '; }')
f.write(b'\n' if mode == 'wb' else '\n')
f.close()
if isWin32CMDEXE:
command = ['cmd','/c', script]
else:
if bashPath:
command = [bashPath, script]
else:
command = ['/bin/sh', script]
if litConfig.useValgrind:
# FIXME: Running valgrind on sh is overkill. We probably could just
# run on clang with no real loss.
command = litConfig.valgrindArgs + command
try:
out, err, exitCode = lit.util.executeCommand(command, cwd=cwd,
env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
return (out, err, exitCode, None)
except lit.util.ExecuteCommandTimeoutException as e:
return (e.out, e.err, e.exitCode, e.msg)
def parseIntegratedTestScriptCommands(source_path, keywords):
"""
parseIntegratedTestScriptCommands(source_path) -> commands
Parse the commands in an integrated test script file into a list of
(line_number, command_type, line).
"""
# This code is carefully written to be dual compatible with Python 2.5+ and
# Python 3 without requiring input files to always have valid codings. The
# trick we use is to open the file in binary mode and use the regular
# expression library to find the commands, with it scanning strings in
# Python2 and bytes in Python3.
#
# Once we find a match, we do require each script line to be decodable to
# UTF-8, so we convert the outputs to UTF-8 before returning. This way the
# remaining code can work with "strings" agnostic of the executing Python
# version.
keywords_re = re.compile(
to_bytes("(%s)(.*)\n" % ("|".join(re.escape(k) for k in keywords),)))
f = open(source_path, 'rb')
try:
# Read the entire file contents.
data = f.read()
# Ensure the data ends with a newline.
if not data.endswith(to_bytes('\n')):
data = data + to_bytes('\n')
# Iterate over the matches.
line_number = 1
last_match_position = 0
for match in keywords_re.finditer(data):
# Compute the updated line number by counting the intervening
# newlines.
match_position = match.start()
line_number += data.count(to_bytes('\n'), last_match_position,
match_position)
last_match_position = match_position
# Convert the keyword and line to UTF-8 strings and yield the
# command. Note that we take care to return regular strings in
# Python 2, to avoid other code having to differentiate between the
# str and unicode types.
#
# Opening the file in binary mode prevented Windows \r newline
# characters from being converted to Unix \n newlines, so manually
# strip those from the yielded lines.
keyword,ln = match.groups()
yield (line_number, to_string(keyword.decode('utf-8')),
to_string(ln.decode('utf-8').rstrip('\r')))
finally:
f.close()
def getTempPaths(test):
"""Get the temporary location, this is always relative to the test suite
root, not test source root."""
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpDir = os.path.join(execdir, 'Output')
tmpBase = os.path.join(tmpDir, execbase)
return tmpDir, tmpBase
def colonNormalizePath(path):
if kIsWindows:
return re.sub(r'^(.):', r'\1', path.replace('\\', '/'))
else:
assert path[0] == '/'
return path[1:]
def getDefaultSubstitutions(test, tmpDir, tmpBase, normalize_slashes=False):
sourcepath = test.getSourcePath()
sourcedir = os.path.dirname(sourcepath)
# Normalize slashes, if requested.
if normalize_slashes:
sourcepath = sourcepath.replace('\\', '/')
sourcedir = sourcedir.replace('\\', '/')
tmpDir = tmpDir.replace('\\', '/')
tmpBase = tmpBase.replace('\\', '/')
# We use #_MARKER_# to hide %% while we do the other substitutions.
substitutions = []
substitutions.extend([('%%', '#_MARKER_#')])
substitutions.extend(test.config.substitutions)
tmpName = tmpBase + '.tmp'
baseName = os.path.basename(tmpBase)
substitutions.extend([('%s', sourcepath),
('%S', sourcedir),
('%p', sourcedir),
('%{pathsep}', os.pathsep),
('%t', tmpName),
('%basename_t', baseName),
('%T', tmpDir),
('#_MARKER_#', '%')])
# "%/[STpst]" should be normalized.
substitutions.extend([
('%/s', sourcepath.replace('\\', '/')),
('%/S', sourcedir.replace('\\', '/')),
('%/p', sourcedir.replace('\\', '/')),
('%/t', tmpBase.replace('\\', '/') + '.tmp'),
('%/T', tmpDir.replace('\\', '/')),
])
# "%:[STpst]" are normalized paths without colons and without a leading
# slash.
substitutions.extend([
('%:s', colonNormalizePath(sourcepath)),
('%:S', colonNormalizePath(sourcedir)),
('%:p', colonNormalizePath(sourcedir)),
('%:t', colonNormalizePath(tmpBase + '.tmp')),
('%:T', colonNormalizePath(tmpDir)),
])
return substitutions
def applySubstitutions(script, substitutions):
"""Apply substitutions to the script. Allow full regular expression syntax.
Replace each matching occurrence of regular expression pattern a with
substitution b in line ln."""
def processLine(ln):
# Apply substitutions
for a,b in substitutions:
if kIsWindows:
b = b.replace("\\","\\\\")
ln = re.sub(a, b, ln)
# Strip the trailing newline and any extra whitespace.
return ln.strip()
# Note Python 3 map() gives an iterator rather than a list so explicitly
# convert to list before returning.
return list(map(processLine, script))
class ParserKind(object):
"""
An enumeration representing the style of an integrated test keyword or
command.
TAG: A keyword taking no value. Ex 'END.'
COMMAND: A keyword taking a list of shell commands. Ex 'RUN:'
LIST: A keyword taking a comma-separated list of values.
BOOLEAN_EXPR: A keyword taking a comma-separated list of
boolean expressions. Ex 'XFAIL:'
CUSTOM: A keyword with custom parsing semantics.
"""
TAG = 0
COMMAND = 1
LIST = 2
BOOLEAN_EXPR = 3
CUSTOM = 4
@staticmethod
def allowedKeywordSuffixes(value):
return { ParserKind.TAG: ['.'],
ParserKind.COMMAND: [':'],
ParserKind.LIST: [':'],
ParserKind.BOOLEAN_EXPR: [':'],
ParserKind.CUSTOM: [':', '.']
} [value]
@staticmethod
def str(value):
return { ParserKind.TAG: 'TAG',
ParserKind.COMMAND: 'COMMAND',
ParserKind.LIST: 'LIST',
ParserKind.BOOLEAN_EXPR: 'BOOLEAN_EXPR',
ParserKind.CUSTOM: 'CUSTOM'
} [value]
class IntegratedTestKeywordParser(object):
"""A parser for LLVM/Clang style integrated test scripts.
keyword: The keyword to parse for. It must end in either '.' or ':'.
kind: An value of ParserKind.
parser: A custom parser. This value may only be specified with
ParserKind.CUSTOM.
"""
def __init__(self, keyword, kind, parser=None, initial_value=None):
allowedSuffixes = ParserKind.allowedKeywordSuffixes(kind)
if len(keyword) == 0 or keyword[-1] not in allowedSuffixes:
if len(allowedSuffixes) == 1:
raise ValueError("Keyword '%s' of kind '%s' must end in '%s'"
% (keyword, ParserKind.str(kind),
allowedSuffixes[0]))
else:
raise ValueError("Keyword '%s' of kind '%s' must end in "
" one of '%s'"
% (keyword, ParserKind.str(kind),
' '.join(allowedSuffixes)))
if parser is not None and kind != ParserKind.CUSTOM:
raise ValueError("custom parsers can only be specified with "
"ParserKind.CUSTOM")
self.keyword = keyword
self.kind = kind
self.parsed_lines = []
self.value = initial_value
self.parser = parser
if kind == ParserKind.COMMAND:
self.parser = lambda line_number, line, output: \
self._handleCommand(line_number, line, output,
self.keyword)
elif kind == ParserKind.LIST:
self.parser = self._handleList
elif kind == ParserKind.BOOLEAN_EXPR:
self.parser = self._handleBooleanExpr
elif kind == ParserKind.TAG:
self.parser = self._handleTag
elif kind == ParserKind.CUSTOM:
if parser is None:
raise ValueError("ParserKind.CUSTOM requires a custom parser")
self.parser = parser
else:
raise ValueError("Unknown kind '%s'" % kind)
def parseLine(self, line_number, line):
try:
self.parsed_lines += [(line_number, line)]
self.value = self.parser(line_number, line, self.value)
except ValueError as e:
raise ValueError(str(e) + ("\nin %s directive on test line %d" %
(self.keyword, line_number)))
def getValue(self):
return self.value
@staticmethod
def _handleTag(line_number, line, output):
"""A helper for parsing TAG type keywords"""
return (not line.strip() or output)
@staticmethod
def _handleCommand(line_number, line, output, keyword):
"""A helper for parsing COMMAND type keywords"""
# Trim trailing whitespace.
line = line.rstrip()
# Substitute line number expressions
line = re.sub(r'%\(line\)', str(line_number), line)
def replace_line_number(match):
if match.group(1) == '+':
return str(line_number + int(match.group(2)))
if match.group(1) == '-':
return str(line_number - int(match.group(2)))
line = re.sub(r'%\(line *([\+-]) *(\d+)\)', replace_line_number, line)
# Collapse lines with trailing '\\'.
if output and output[-1][-1] == '\\':
output[-1] = output[-1][:-1] + line
else:
if output is None:
output = []
pdbg = "%dbg({keyword} at line {line_number})".format(
keyword=keyword,
line_number=line_number)
assert re.match(kPdbgRegex + "$", pdbg), \
"kPdbgRegex expected to match actual %dbg usage"
line = "{pdbg} {real_command}".format(
pdbg=pdbg,
real_command=line)
output.append(line)
return output
@staticmethod
def _handleList(line_number, line, output):
"""A parser for LIST type keywords"""
if output is None:
output = []
output.extend([s.strip() for s in line.split(',')])
return output
@staticmethod
def _handleBooleanExpr(line_number, line, output):
"""A parser for BOOLEAN_EXPR type keywords"""
parts = [s.strip() for s in line.split(',') if s.strip() != '']
if output and output[-1][-1] == '\\':
output[-1] = output[-1][:-1] + parts[0]
del parts[0]
if output is None:
output = []
output.extend(parts)
# Evaluate each expression to verify syntax.
# We don't want any results, just the raised ValueError.
for s in output:
if s != '*' and not s.endswith('\\'):
BooleanExpression.evaluate(s, [])
return output
@staticmethod
def _handleRequiresAny(line_number, line, output):
"""A custom parser to transform REQUIRES-ANY: into REQUIRES:"""
# Extract the conditions specified in REQUIRES-ANY: as written.
conditions = []
IntegratedTestKeywordParser._handleList(line_number, line, conditions)
# Output a `REQUIRES: a || b || c` expression in its place.
expression = ' || '.join(conditions)
IntegratedTestKeywordParser._handleBooleanExpr(line_number,
expression, output)
return output
def parseIntegratedTestScript(test, additional_parsers=[],
require_script=True):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'REQUIRES'
and 'UNSUPPORTED' information.
If additional parsers are specified then the test is also scanned for the
keywords they specify and all matches are passed to the custom parser.
If 'require_script' is False an empty script
may be returned. This can be used for test formats where the actual script
is optional or ignored.
"""
# Install the built-in keyword parsers.
script = []
builtin_parsers = [
IntegratedTestKeywordParser('RUN:', ParserKind.COMMAND,
initial_value=script),
IntegratedTestKeywordParser('XFAIL:', ParserKind.BOOLEAN_EXPR,
initial_value=test.xfails),
IntegratedTestKeywordParser('REQUIRES:', ParserKind.BOOLEAN_EXPR,
initial_value=test.requires),
IntegratedTestKeywordParser('REQUIRES-ANY:', ParserKind.CUSTOM,
IntegratedTestKeywordParser._handleRequiresAny,
initial_value=test.requires),
IntegratedTestKeywordParser('UNSUPPORTED:', ParserKind.BOOLEAN_EXPR,
initial_value=test.unsupported),
IntegratedTestKeywordParser('END.', ParserKind.TAG)
]
keyword_parsers = {p.keyword: p for p in builtin_parsers}
# Install user-defined additional parsers.
for parser in additional_parsers:
if not isinstance(parser, IntegratedTestKeywordParser):
raise ValueError('additional parser must be an instance of '
'IntegratedTestKeywordParser')
if parser.keyword in keyword_parsers:
raise ValueError("Parser for keyword '%s' already exists"
% parser.keyword)
keyword_parsers[parser.keyword] = parser
# Collect the test lines from the script.
sourcepath = test.getSourcePath()
for line_number, command_type, ln in \
parseIntegratedTestScriptCommands(sourcepath,
keyword_parsers.keys()):
parser = keyword_parsers[command_type]
parser.parseLine(line_number, ln)
if command_type == 'END.' and parser.getValue() is True:
break
# Verify the script contains a run line.
if require_script and not script:
return lit.Test.Result(Test.UNRESOLVED, "Test has no run line!")
# Check for unterminated run lines.
if script and script[-1][-1] == '\\':
return lit.Test.Result(Test.UNRESOLVED,
"Test has unterminated run lines (with '\\')")
# Check boolean expressions for unterminated lines.
for key in keyword_parsers:
kp = keyword_parsers[key]
if kp.kind != ParserKind.BOOLEAN_EXPR:
continue
value = kp.getValue()
if value and value[-1][-1] == '\\':
raise ValueError("Test has unterminated %s lines (with '\\')" % key)
# Enforce REQUIRES:
missing_required_features = test.getMissingRequiredFeatures()
if missing_required_features:
msg = ', '.join(missing_required_features)
return lit.Test.Result(Test.UNSUPPORTED,
"Test requires the following unavailable "
"features: %s" % msg)
# Enforce UNSUPPORTED:
unsupported_features = test.getUnsupportedFeatures()
if unsupported_features:
msg = ', '.join(unsupported_features)
return lit.Test.Result(
Test.UNSUPPORTED,
"Test does not support the following features "
"and/or targets: %s" % msg)
# Enforce limit_to_features.
if not test.isWithinFeatureLimits():
msg = ', '.join(test.config.limit_to_features)
return lit.Test.Result(Test.UNSUPPORTED,
"Test does not require any of the features "
"specified in limit_to_features: %s" % msg)
return script
def _runShTest(test, litConfig, useExternalSh, script, tmpBase):
# Create the output directory if it does not already exist.
lit.util.mkdir_p(os.path.dirname(tmpBase))
execdir = os.path.dirname(test.getExecPath())
if useExternalSh:
res = executeScript(test, litConfig, tmpBase, script, execdir)
else:
res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
if isinstance(res, lit.Test.Result):
return res
out,err,exitCode,timeoutInfo = res
if exitCode == 0:
status = Test.PASS
else:
if timeoutInfo is None:
status = Test.FAIL
else:
status = Test.TIMEOUT
# Form the output log.
output = """Script:\n--\n%s\n--\nExit Code: %d\n""" % (
'\n'.join(script), exitCode)
if timeoutInfo is not None:
output += """Timeout: %s\n""" % (timeoutInfo,)
output += "\n"
# Append the outputs, if present.
if out:
output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
if err:
output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
return lit.Test.Result(status, output)
def executeShTest(test, litConfig, useExternalSh,
extra_substitutions=[]):
if test.config.unsupported:
return lit.Test.Result(Test.UNSUPPORTED, 'Test is unsupported')
script = parseIntegratedTestScript(test)
if isinstance(script, lit.Test.Result):
return script
if litConfig.noExecute:
return lit.Test.Result(Test.PASS)
tmpDir, tmpBase = getTempPaths(test)
substitutions = list(extra_substitutions)
substitutions += getDefaultSubstitutions(test, tmpDir, tmpBase,
normalize_slashes=useExternalSh)
script = applySubstitutions(script, substitutions)
# Re-run failed tests up to test_retry_attempts times.
attempts = 1
if hasattr(test.config, 'test_retry_attempts'):
attempts += test.config.test_retry_attempts
for i in range(attempts):
res = _runShTest(test, litConfig, useExternalSh, script, tmpBase)
if res.code != Test.FAIL:
break
# If we had to run the test more than once, count it as a flaky pass. These
# will be printed separately in the test summary.
if i > 0 and res.code == Test.PASS:
res.code = Test.FLAKYPASS
return res
| apple/swift-llvm | utils/lit/lit/TestRunner.py | Python | apache-2.0 | 63,764 |
###### configuration start ########
job_target = 'thingiverse'
###### configuration finish ########
import sqlite3
import db_insert_jobs_base as myinsert
from datetime import datetime
import sys
import platform
client_id = platform.node()
print "## hello, ", client_id, job_target
def make_job(i):
thing_id = str(i);
job_id = 't_'+thing_id;
job_url = 'http://www.thingiverse.com/thing:'+thing_id ##+'/' ## do not forge to add / in the end
job_file_path = 'web_jobs/%s/%s.html'%(job_target, job_id)
client_id = 'db_insert_row.py'
create_date = str(datetime.now())
update_date = str(datetime.now())
job_status = 1
http_status = -1
job = myinsert.make_job(job_id, job_url, job_file_path, client_id, create_date, update_date, job_status, http_status)
return job
###### custom function start, need to modify according to real case
def job_mongodb(option):
i = 0
t = 0
jobs = []
while i < 200000:
job = make_job(i)
#print job
if option == 'upsert_each':
myinsert.job_upsert(job, job_target)
elif option == 'insert_each':
myinsert.job_insert(job, job_target)
elif option == 'insert_bulk':
jobs.append(job)
i = i + 1
if not(option == 'insert_bulk'):
print i,
if i > t + 1000:
t = t + 1000
print t
if option == 'insert_bulk':
myinsert.job_insert(jobs, job_target);
jobs = []
if option == 'insert_bulk':
myinsert.job_insert(jobs, job_target);
print i
###### custom function end, need to modify according to real case
if __name__ == "__main__":
print "CMD: format: python db_insert_jobs_appid_to_asin.py [upsert_each | insert_each | insert_bulk]"
cmds = sys.argv
print len(cmds)
print cmds
if len(cmds) != 2:
print "ERROR: please follow the CMD format"
print "CMD: please try again"
return
elif (cmds[1] != 'upsert_each' and cmds[1] != 'insert_each' and cmds[1] != 'insert_bulk'):
print "ERROR: please follow the CMD format", cmds
print "CMD: please try again"
return
else :
print "start"
job_mongodb(cmds[1])
print 'done'
return
| jianhuashao/WebDownloadJobsManage | server/db_insert_jobs_thingivers.py | Python | apache-2.0 | 2,023 |
from django.contrib import admin
from django.db import models
from django.shortcuts import redirect
from django.utils.safestring import mark_safe
from django.forms import CheckboxSelectMultiple
from .models import *
class FunkySaveAdmin(object):
'''
Redirects to the object on site when clicking the save button
'''
def response_add(self, request, obj, post_url_continue=None):
if '_save' in request.POST:
return redirect(obj.get_absolute_url())
else:
return super(FunkySaveAdmin, self).response_add(request, obj, post_url_continue)
def response_change(self, request, obj):
if '_save' in request.POST:
return redirect(obj.get_absolute_url())
else:
return super(FunkySaveAdmin, self).response_change(request, obj)
def add_view(self, request, form_url='', extra_context={}):
extra_context['show_save_and_return'] = True
return super().add_view(
request, form_url, extra_context=extra_context,
)
def change_view(self, request, object_id, form_url='', extra_context={}):
extra_context['show_save_and_return'] = True
return super().change_view(
request, object_id, form_url, extra_context=extra_context,
)
class InlineStepFileAdmin(admin.StackedInline):
model = StepFile
extra = 0
class InlineAssignmentAdmin(admin.StackedInline):
model = Assignment
extra = 0
class InlineDownloadAdmin(admin.StackedInline):
model = Download
extra = 0
class InlinePresentationAdmin(admin.StackedInline):
model = Presentation
extra = 0
class InlineRightAnswerAdmin(admin.StackedInline):
model = RightAnswer
extra = 0
class InlineWrongAnswerAdmin(admin.StackedInline):
model = WrongAnswer
extra = 0
class InlineClarificationAdmin(admin.StackedInline):
model = Clarification
extra = 0
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
pass
@admin.register(Course)
class CourseAdmin(FunkySaveAdmin, admin.ModelAdmin):
list_display = ['__str__', 'order', 'url']
prepopulated_fields = {'slug': ['name']}
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
@admin.register(Session)
class SessionAdmin(FunkySaveAdmin, admin.ModelAdmin):
def has_add_permission(self, request):
return False
ordering = ['course__order', 'number']
list_filter = ['course']
list_display = ['__str__', 'name', 'course', 'registration_enabled', 'active']
list_display_links = ['__str__']
inlines = [InlineDownloadAdmin, InlinePresentationAdmin]
exclude = ['course']
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
@admin.register(Assignment)
class AssignmentAdmin(FunkySaveAdmin, admin.ModelAdmin):
ordering = ['session__course__order', 'session__number', 'number']
list_display = ['__str__', 'session', 'nr_of_steps', 'active', 'locked']
list_filter = ['active', 'locked', 'session__course', 'session']
@admin.register(Step)
class StepAdmin(FunkySaveAdmin, admin.ModelAdmin):
def has_add_permission(self, request):
return False
ordering = ['assignment__session__course__order', 'assignment__session__number', 'assignment__number', 'number']
list_display = ['__str__', 'assignment', 'get_description', 'answer_required']
list_filter = ['assignment__session', 'assignment']
inlines = [InlineRightAnswerAdmin, InlineWrongAnswerAdmin, InlineClarificationAdmin, InlineStepFileAdmin]
exclude = ['assignment']
save_on_top = True
def get_description(self, obj):
return mark_safe(obj.description.raw.replace('\n', '<br>'))
| JaapJoris/bps | autodidact/admin.py | Python | agpl-3.0 | 3,733 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from keystone.common import dependency
from keystone.common import utils as ks_utils
from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone.i18n import _
from keystone.token import provider
from keystone.token.providers import common
from keystone.token.providers.fernet import token_formatters as tf
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@dependency.requires('trust_api')
class Provider(common.BaseProvider):
def __init__(self, *args, **kwargs):
super(Provider, self).__init__(*args, **kwargs)
self.token_formatter = tf.TokenFormatter()
def needs_persistence(self):
"""Should the token be written to a backend."""
return False
def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
"""Issue a V2 formatted token.
:param token_ref: reference describing the token
:param roles_ref: reference describing the roles for the token
:param catalog_ref: reference describing the token's catalog
:returns: tuple containing the ID of the token and the token data
"""
# TODO(lbragstad): Currently, Fernet tokens don't support bind in the
# token format. Raise a 501 if we're dealing with bind.
if token_ref.get('bind'):
raise exception.NotImplemented()
user_id = token_ref['user']['id']
# Default to password since methods not provided by token_ref
method_names = ['password']
project_id = None
# Verify that tenant is not None in token_ref
if token_ref.get('tenant'):
project_id = token_ref['tenant']['id']
parent_audit_id = token_ref.get('parent_audit_id')
# If parent_audit_id is defined then a token authentication was made
if parent_audit_id:
method_names.append('token')
audit_ids = provider.audit_info(parent_audit_id)
# Get v3 token data and exclude building v3 specific catalog. This is
# due to the fact that the V2TokenDataHelper.format_token() method
# doesn't build any of the token_reference from other Keystone APIs.
# Instead, it builds it from what is persisted in the token reference.
# Here we are going to leverage the V3TokenDataHelper.get_token_data()
# method written for V3 because it goes through and populates the token
# reference dynamically. Once we have a V3 token reference, we can
# attempt to convert it to a V2 token response.
v3_token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
project_id=project_id,
token=token_ref,
include_catalog=False,
audit_info=audit_ids)
expires_at = v3_token_data['token']['expires_at']
token_id = self.token_formatter.create_token(user_id, expires_at,
audit_ids,
methods=method_names,
project_id=project_id)
self._build_issued_at_info(token_id, v3_token_data)
# Convert v3 to v2 token data and build v2 catalog
token_data = self.v2_token_data_helper.v3_to_v2_token(token_id,
v3_token_data)
return token_id, token_data
def issue_v3_token(self, *args, **kwargs):
token_id, token_data = super(Provider, self).issue_v3_token(
*args, **kwargs)
self._build_issued_at_info(token_id, token_data)
return token_id, token_data
def _build_issued_at_info(self, token_id, token_data):
# NOTE(roxanaghe, lbragstad): We must use the creation time that
# Fernet builds into it's token. The Fernet spec details that the
# token creation time is built into the token, outside of the payload
# provided by Keystone. This is the reason why we don't pass the
# issued_at time in the payload. This also means that we shouldn't
# return a token reference with a creation time that we created
# when Fernet uses a different creation time. We should use the
# creation time provided by Fernet because it's the creation time
# that we have to rely on when we validate the token.
fernet_creation_datetime_obj = self.token_formatter.creation_time(
token_id)
token_data['token']['issued_at'] = ks_utils.isotime(
at=fernet_creation_datetime_obj, subsecond=True)
def _build_federated_info(self, token_data):
"""Extract everything needed for federated tokens.
This dictionary is passed to the FederatedPayload token formatter,
which unpacks the values and builds the Fernet token.
"""
group_ids = token_data['token'].get('user', {}).get(
federation_constants.FEDERATION, {}).get('groups')
idp_id = token_data['token'].get('user', {}).get(
federation_constants.FEDERATION, {}).get(
'identity_provider', {}).get('id')
protocol_id = token_data['token'].get('user', {}).get(
federation_constants.FEDERATION, {}).get('protocol', {}).get('id')
if not group_ids:
group_ids = list()
if group_ids:
federated_dict = dict(group_ids=group_ids, idp_id=idp_id,
protocol_id=protocol_id)
return federated_dict
return None
def _rebuild_federated_info(self, federated_dict, user_id):
"""Format federated information into the token reference.
The federated_dict is passed back from the FederatedPayload token
formatter. The responsibility of this method is to format the
information passed back from the token formatter into the token
reference before constructing the token data from the
V3TokenDataHelper.
"""
g_ids = federated_dict['group_ids']
idp_id = federated_dict['idp_id']
protocol_id = federated_dict['protocol_id']
federated_info = dict(groups=g_ids,
identity_provider=dict(id=idp_id),
protocol=dict(id=protocol_id))
token_dict = {'user': {
federation_constants.FEDERATION: federated_info}}
token_dict['user']['id'] = user_id
token_dict['user']['name'] = user_id
return token_dict
def validate_v2_token(self, token_ref):
"""Validate a V2 formatted token.
:param token_ref: reference describing the token to validate
:returns: the token data
:raises keystone.exception.TokenNotFound: if token format is invalid
:raises keystone.exception.Unauthorized: if v3 token is used
"""
try:
(user_id, methods,
audit_ids, domain_id,
project_id, trust_id,
federated_info, created_at,
expires_at) = self.token_formatter.validate_token(token_ref)
except exception.ValidationError as e:
raise exception.TokenNotFound(e)
if trust_id or domain_id or federated_info:
msg = _('This is not a v2.0 Fernet token. Use v3 for trust, '
'domain, or federated tokens.')
raise exception.Unauthorized(msg)
v3_token_data = self.v3_token_data_helper.get_token_data(
user_id,
methods,
project_id=project_id,
expires=expires_at,
issued_at=created_at,
token=token_ref,
include_catalog=False,
audit_info=audit_ids)
return self.v2_token_data_helper.v3_to_v2_token(token_ref,
v3_token_data)
def validate_v3_token(self, token):
"""Validate a V3 formatted token.
:param token: a string describing the token to validate
:returns: the token data
:raises keystone.exception.TokenNotFound: if token format version isn't
supported
"""
try:
(user_id, methods, audit_ids, domain_id, project_id, trust_id,
federated_info, created_at, expires_at) = (
self.token_formatter.validate_token(token))
except exception.ValidationError as e:
raise exception.TokenNotFound(e)
token_dict = None
trust_ref = None
if federated_info:
token_dict = self._rebuild_federated_info(federated_info, user_id)
if trust_id:
trust_ref = self.trust_api.get_trust(trust_id)
return self.v3_token_data_helper.get_token_data(
user_id,
method_names=methods,
domain_id=domain_id,
project_id=project_id,
issued_at=created_at,
expires=expires_at,
trust=trust_ref,
token=token_dict,
audit_info=audit_ids)
def _get_token_id(self, token_data):
"""Generate the token_id based upon the data in token_data.
:param token_data: token information
:type token_data: dict
:raises keystone.exception.NotImplemented: when called
"""
return self.token_formatter.create_token(
token_data['token']['user']['id'],
token_data['token']['expires_at'],
token_data['token']['audit_ids'],
methods=token_data['token'].get('methods'),
domain_id=token_data['token'].get('domain', {}).get('id'),
project_id=token_data['token'].get('project', {}).get('id'),
trust_id=token_data['token'].get('OS-TRUST:trust', {}).get('id'),
federated_info=self._build_federated_info(token_data)
)
@property
def _supports_bind_authentication(self):
"""Return if the token provider supports bind authentication methods.
:returns: False
"""
return False
| dstanek/keystone | keystone/token/providers/fernet/core.py | Python | apache-2.0 | 10,692 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
i = 30
def pri():
print i
i = 50
pri()
print i
| huangby/javaweb | pythonfile/test/build.py | Python | epl-1.0 | 111 |
import pyspark.sql.functions as func
from pyspark import SparkContext, SparkConf, SparkFiles
from pyspark.sql import SQLContext, Row
import ConfigParser as configparser
import os
from datetime import datetime
from vina_utils import get_directory_pdb_analysis, get_ligand_from_receptor_ligand_model
from database_io import load_database
def save_log(finish_time, start_time):
log_file_name = 'mult_objective_selection.log'
current_path = os.getcwd()
path_file = os.path.join(current_path, log_file_name)
log_file = open(path_file, 'w')
diff_time = finish_time - start_time
msg = 'Starting ' + str(start_time) +'\n'
log_file.write(msg)
msg = 'Finishing ' + str(finish_time) +'\n'
log_file.write(msg)
msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\n'
log_file.write(msg)
def main():
config = configparser.ConfigParser()
config.read('config.ini')
#Number of poses to select by buried area
number_poses_to_select_mult_obj = int(config.get('DRUGDESIGN', 'number_poses_to_select_mult_obj') )
#Path that contains all files for analysis
path_analysis = config.get('DEFAULT', 'path_analysis')
#File for saving the filtered buried area
result_file_to_select_buried_area = config.get('DRUGDESIGN', 'result_file_to_select_buried_area')
#File for saving the filtered buried area only poses
result_file_to_select_buried_area_only_pose = config.get('DRUGDESIGN', 'result_file_to_select_buried_area_only_pose')
result_file_to_select_normalized_buried_area_only_pose = config.get('DRUGDESIGN', 'result_file_to_select_normalized_buried_area_only_pose')
#Ligand Database file
ligand_database = config.get('DEFAULT', 'ligand_database_path_file')
#Path where all pdb receptor are
path_receptor = config.get('DEFAULT', 'pdb_path')
#Path for saving pdb files of models generated by VS
path_ligand = get_directory_pdb_analysis(path_analysis)
#Path where saved the selected compelex
path_to_save = os.path.join(path_analysis, "mult_objective")
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
# Create SPARK config
maxResultSize = str(config.get('SPARK', 'maxResultSize'))
conf = (SparkConf().set("spark.driver.maxResultSize", maxResultSize))
# Create context
sc = SparkContext(conf=conf)
sqlCtx = SQLContext(sc)
#Adding Python Source file
#Path for drugdesign project
path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')
sc.addPyFile(os.path.join(path_spark_drugdesign,"vina_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"pdb_io.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"database_io.py"))
start_time = datetime.now()
finish_time = datetime.now()
save_log(finish_time, start_time)
main() | rodrigofaccioli/drugdesign | virtualscreening/vina/spark/mult_objective_selection.py | Python | apache-2.0 | 2,716 |
import unittest
class MercuryInventoryControllerTest(unittest.TestCase):
"""Base class for mercury-inventory unit tests."""
def test_init(self):
assert True | jr0d/mercury | src/tests/inventory/unit/test_db_controller.py | Python | apache-2.0 | 174 |
#!/usr/bin/env python
import optparse
import os
def main():
p = optparse.OptionParser(description="Python 'ls' command clone",
prog="pyls",
version="0.1a",
usage="%prog [directory]")
p.add_option("--verbose", "-v", action="store_true",
help="Enables Verbose Output",default=False)
options, arguments = p.parse_args()
if len(arguments) == 1:
if options.verbose:
print("Verbose Mode Enabled")
path = arguments[0]
for filename in os.listdir(path):
if options.verbose:
print("Filename: %s " % filename)
else:
print(filename)
else:
p.print_help()
if __name__ == '__main__':
main()
#默认为关闭 | lluxury/P_U_S_A | 13_commandline/true_false.py | Python | mit | 830 |
# (C) 2015 Elke Schaper
"""
:synopsis: Input/output for sequences
.. moduleauthor:: Elke Schaper <[email protected]>
"""
from Bio import SeqIO
import logging
LOG = logging.getLogger(__name__)
# ########## READ SEQUENCE ###################################################
def read_fasta(file, indices=None):
""" Read all sequences from a fasta file.
Read all sequences from a fasta file.
At current, the Biopython SeqIO parser is used.
Args:
file (str): Path to input file
start ([int, int]): Index of the first returned sequence, and the first
not returned sequence.
"""
# Making a list out if the generator object might be overhead for huge
# fastafiles
count = 0
for seq_record in SeqIO.parse(file, "fasta"):
if indices:
count += 1
if count < indices[0]:
continue
elif count >= indices[1]:
break
yield str(seq_record.seq), seq_record.id
# ########## WRITE SEQUENCE #################################################
def write(sequence, sequence_file, sequence_id="sequence_id_not_defined"):
""" Write a sequence str to fasta format in specified <sequence_file>
Write s sequence str to fasta format in specified <sequence_file>
Args:
sequence (str): Sequence
sequence_file (str): Path to the output file
sequence_id (str): ID of the sequence in the output file.
"""
with open(sequence_file, 'a') as fastafile:
fastafile.write(">" + str(sequence_id) + '\n')
fastafile.write(str(sequence) + '\n')
| elkeschaper/tral | tral/sequence/sequence_io.py | Python | gpl-2.0 | 1,655 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import telemetry.timeline.bounds as timeline_bounds
from telemetry import decorators
# Enables the fast metric for this interaction
IS_FAST = 'is_fast'
# Enables the responsiveness metric for this interaction
IS_RESPONSIVE = 'is_responsive'
# Enables the smoothness metric for this interaction
IS_SMOOTH = 'is_smooth'
# Allows multiple duplicate interactions of the same type
REPEATABLE = 'repeatable'
METRICS = [
IS_FAST,
IS_RESPONSIVE,
IS_SMOOTH
]
FLAGS = METRICS + [REPEATABLE]
class ThreadTimeRangeOverlappedException(Exception):
"""Exception that can be thrown when computing overlapped thread time range
with other events.
"""
class NoThreadTimeDataException(ThreadTimeRangeOverlappedException):
"""Exception that can be thrown if there is not sufficient thread time data
to compute the overlapped thread time range."""
def IsTimelineInteractionRecord(event_name):
return event_name.startswith('Interaction.')
def _AssertFlagsAreValid(flags):
assert isinstance(flags, list)
for f in flags:
if f not in FLAGS:
raise AssertionError(
'Unrecognized flag for a timeline interaction record: %s' % f)
def GetJavaScriptMarker(label, flags):
"""Computes the marker string of an interaction record.
This marker string can be used with JavaScript API console.time()
and console.timeEnd() to mark the beginning and end of the
interaction record..
Args:
label: The label used to identify the interaction record.
flags: the flags for the interaction record see FLAGS above.
Returns:
The interaction record marker string (e.g., Interaction.Label/flag1,flag2).
Raises:
AssertionError: If one or more of the flags is unrecognized.
"""
_AssertFlagsAreValid(flags)
return 'Interaction.%s/%s' % (label, ','.join(flags))
class TimelineInteractionRecord(object):
"""Represents an interaction that took place during a timeline recording.
As a page runs, typically a number of different (simulated) user interactions
take place. For instance, a user might click a button in a mail app causing a
popup to animate in. Then they might press another button that sends data to a
server and simultaneously closes the popup without an animation. These are two
interactions.
From the point of view of the page, each interaction might have a different
label: ClickComposeButton and SendEmail, for instance. From the point
of view of the benchmarking harness, the labels aren't so interesting as what
the performance expectations are for that interaction: was it loading
resources from the network? was there an animation?
Determining these things is hard to do, simply by observing the state given to
a page from javascript. There are hints, for instance if network requests are
sent, or if a CSS animation is pending. But this is by no means a complete
story.
Instead, we expect pages to mark up the timeline what they are doing, with
label and flags indicating the semantics of that interaction. This
is currently done by pushing markers into the console.time/timeEnd API: this
for instance can be issued in JS:
var str = 'Interaction.SendEmail/is_smooth,is_responsive,is_fast';
console.time(str);
setTimeout(function() {
console.timeEnd(str);
}, 1000);
When run with perf.measurements.timeline_based_measurement running, this will
then cause a TimelineInteractionRecord to be created for this range with
smoothness, responsive, and fast metrics reported for the marked up 1000ms
time-range.
The valid interaction flags are:
* is_fast: Enables the fast metric
* is_responsive: Enables the responsiveness metric
* is_smooth: Enables the smoothness metric
* repeatable: Allows other interactions to use the same label
"""
def __init__(self, label, start, end, async_event=None, flags=None):
assert label
self._label = label
self._start = start
self._end = end
self._async_event = async_event
self._flags = flags if flags is not None else []
_AssertFlagsAreValid(self._flags)
@property
def label(self):
return self._label
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def is_fast(self):
return IS_FAST in self._flags
@property
def is_responsive(self):
return IS_RESPONSIVE in self._flags
@property
def is_smooth(self):
return IS_SMOOTH in self._flags
@property
def repeatable(self):
return REPEATABLE in self._flags
# TODO(nednguyen): After crbug.com/367175 is marked fixed, we should be able
# to get rid of perf.measurements.smooth_gesture_util and make this the only
# constructor method for TimelineInteractionRecord.
@classmethod
def FromAsyncEvent(cls, async_event):
"""Construct an timeline_interaction_record from an async event.
Args:
async_event: An instance of
telemetry.timeline.async_slices.AsyncSlice
"""
assert async_event.start_thread == async_event.end_thread, (
'Start thread of this record\'s async event is not the same as its '
'end thread')
m = re.match('Interaction\.(?P<label>.+?)(/(?P<flags>[^/]+))?$',
async_event.name)
assert m, "Async event is not an interaction record."
label = m.group('label')
flags = m.group('flags').split(',') if m.group('flags') is not None else []
return cls(label, async_event.start, async_event.end, async_event, flags)
@decorators.Cache
def GetBounds(self):
bounds = timeline_bounds.Bounds()
bounds.AddValue(self.start)
bounds.AddValue(self.end)
return bounds
def HasMetric(self, metric_type):
if metric_type not in METRICS:
raise AssertionError('Unrecognized metric type for a timeline '
'interaction record: %s' % metric_type)
return metric_type in self._flags
def GetOverlappedThreadTimeForSlice(self, timeline_slice):
"""Get the thread duration of timeline_slice that overlaps with this record.
There are two cases :
Case 1: timeline_slice runs in the same thread as the record.
| [ timeline_slice ]
THREAD 1 | | |
| record starts record ends
(relative order in thread time)
As the thread timestamps in timeline_slice and record are consistent, we
simply use them to compute the overlap.
Case 2: timeline_slice runs in a different thread from the record's.
|
THREAD 2 | [ timeline_slice ]
|
|
THREAD 1 | | |
| record starts record ends
(relative order in wall-time)
Unlike case 1, thread timestamps of a thread are measured by its
thread-specific clock, which is inconsistent with that of the other
thread, and thus can't be used to compute the overlapped thread duration.
Hence, we use a heuristic to compute the overlap (see
_GetOverlappedThreadTimeForSliceInDifferentThread for more details)
Args:
timeline_slice: An instance of telemetry.timeline.slice.Slice
"""
if not self._async_event:
raise ThreadTimeRangeOverlappedException(
'This record was not constructed from async event')
if not self._async_event.has_thread_timestamps:
raise NoThreadTimeDataException(
'This record\'s async_event does not contain thread time data. '
'Event data: %s' % repr(self._async_event))
if not timeline_slice.has_thread_timestamps:
raise NoThreadTimeDataException(
'slice does not contain thread time data')
if timeline_slice.parent_thread == self._async_event.start_thread:
return self._GetOverlappedThreadTimeForSliceInSameThread(
timeline_slice)
else:
return self._GetOverlappedThreadTimeForSliceInDifferentThread(
timeline_slice)
def _GetOverlappedThreadTimeForSliceInSameThread(self, timeline_slice):
return timeline_bounds.Bounds.GetOverlap(
timeline_slice.thread_start, timeline_slice.thread_end,
self._async_event.thread_start, self._async_event.thread_end)
def _GetOverlappedThreadTimeForSliceInDifferentThread(self, timeline_slice):
# In case timeline_slice's parent thread is not the parent thread of the
# async slice that issues this record, we assume that events are descheduled
# uniformly. The overlap duration in thread time is then computed by
# multiplying the overlap wall-time duration of timeline_slice and the
# record's async slice with their thread_duration/duration ratios.
overlapped_walltime_duration = timeline_bounds.Bounds.GetOverlap(
timeline_slice.start, timeline_slice.end,
self.start, self.end)
if timeline_slice.duration == 0 or self._async_event.duration == 0:
return 0
timeline_slice_scheduled_ratio = (
timeline_slice.thread_duration / float(timeline_slice.duration))
record_scheduled_ratio = (
self._async_event.thread_duration / float(self._async_event.duration))
return (overlapped_walltime_duration * timeline_slice_scheduled_ratio *
record_scheduled_ratio)
def __repr__(self):
flags_str = ','.join(self._flags)
return ('TimelineInteractionRecord(label=\'%s\', start=%f, end=%f,' +
' flags=%s, async_event=%s)') % (
self.label,
self.start,
self.end,
flags_str,
repr(self._async_event))
| 7kbird/chrome | tools/telemetry/telemetry/web_perf/timeline_interaction_record.py | Python | bsd-3-clause | 9,910 |
# REFERENCE
# http://flask.pocoo.org/docs/0.10/quickstart/#a-minimal-application
# https://pythonhosted.org/Flask-SQLAlchemy/
# http://zetcode.com/db/postgresqlpythontutorial/
# http://blog.miguelgrinberg.com/post/designing-a-restful-api-with-python-and-flask
# http://blog.luisrei.com/articles/flaskrest.html
# DEPENDENCIES
# pip install -U psycopg2 flask flask-restful pyyaml
from utils import psycopg2,psycopg_connect_atlas
from models import Hasc
from wikiatlas import Gid
import json, utils
import time
import logging
from flask import Flask, jsonify, make_response, Response, request
from flask.ext.cors import CORS, cross_origin
from flask.ext.restful import Resource, Api
app = Flask(__name__)
api = Api(app)
# Enable cross domain requests
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type', 'X-Requested-With'
# API Index
@app.route('/', methods=['GET'])
def api_index():
return app.send_static_file('index.html')
@app.route('/v1/', methods=['GET'])
def api_v1():
return app.send_static_file('v1.html')
@app.route('/v1/index/', methods=['GET'])
def list_countries():
return utils.atlas2json("SELECT hasc, name, ST_Box2D(geom) FROM adm0_area;").replace("BOX(","").replace(")","").replace("st_box2d","bbox")
@app.route('/v1/index/<hasc>', methods=['GET'])
def list_subunits(hasc):
H = Hasc(hasc)
return H.subunits()
@app.route('/v1/bbox/<hasc>', methods=['GET'])
def generate_bbox(hasc):
H = Hasc(hasc)
return H.bbox()
@app.route('/v1/center/<hasc>', methods=['GET'])
def generate_centroid(hasc):
H = Hasc(hasc)
return H.center()
@app.route('/v1/near/<hasc>', methods=['GET'])
def find_nearby_areas(hasc):
H = Hasc(hasc)
return H.near()
@app.route('/v1/data/geojson/<hasc>', methods=['GET'])
def generate_geojson(hasc):
H = Hasc(hasc)
return Response(H.json("geojson"), mimetype='application/json')
#@app.route('/v1/data/<hasc_code>', methods=['GET'])
#def generate_topojson(hasc_code):
# H = Hasc(hasc_code)
# return H.json()
@app.route('/v1/data', methods=['POST'])
@cross_origin()
def data():
"""Data method"""
G = Gid(request.json)
return G.json()
# return L.json(request.json['layer'], request.json['topology'])
# configure Flask logging
from logging import FileHandler
logger = FileHandler('error.log')
app.logger.setLevel(logging.INFO)
app.logger.addHandler(logger)
# log Flask events
app.logger.debug(u"Flask server started " + time.asctime())
@app.after_request
def write_access_log(response):
app.logger.debug(u"%s %s -> %s" % (time.asctime(), request.path, response.status_code))
return response
@app.errorhandler(500)
def internal_error(exception):
app.logger.exception(exception)
# 404 Error handler
@app.errorhandler(404)
def not_found(error):
app.logger.exception(error)
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run(debug=False) | WikimapsAtlas/wikimapsatlas-server | api/api.py | Python | unlicense | 2,949 |
import sys
sys.path.insert(0, ".")
from coalib.misc.StringConverter import StringConverter
import unittest
class ProcessTest(unittest.TestCase):
def setUp(self):
self.uut = StringConverter("\n \\1 \n ")
def test_construction(self):
self.assertRaises(TypeError,
StringConverter,
"test",
strip_whitespaces=5)
self.assertRaises(TypeError,
StringConverter,
"test",
list_delimiters=5)
def test_whitespace_stripping(self):
self.assertEqual(str(self.uut), "1")
self.uut = StringConverter("\n 1 \n", strip_whitespaces=False)
self.assertEqual(str(self.uut), "\n 1 \n")
def test_int_conversion(self):
self.assertEqual(int(self.uut), 1)
self.uut = StringConverter(" not an int ")
self.assertRaises(ValueError, int, self.uut)
def test_float_conversion(self):
self.assertEqual(float(self.uut), 1)
self.uut.value = "0.5 "
self.assertEqual(float(self.uut), 0.5)
self.uut = StringConverter(" not a float ")
self.assertRaises(ValueError, float, self.uut)
def test_len(self):
self.assertEqual(len(self.uut), 1)
def test_iterator(self):
self.uut = StringConverter("a, test with!!some challenge",
list_delimiters=[",", " ", "!!"])
self.assertEqual(list(self.uut),
["a", "test", "with", "some", "challenge"])
self.uut = StringConverter("a\\ \\,\\\\ test with!!some challenge",
list_delimiters=[",", " ", "!!"])
self.assertEqual(list(self.uut),
["a ,\\", "test", "with", "some", "challenge"])
self.uut = StringConverter("a, test with!some \\\\\\ challenge\\ ",
list_delimiters=", !",
strip_whitespaces=False)
self.assertEqual(list(self.uut),
["a", "test", "with", "some", "\\ challenge "])
self.uut = StringConverter("a, test with!some \\\\\\ challenge\\ ",
list_delimiters=", !",
strip_whitespaces=True)
self.assertEqual(list(self.uut),
["a", "test", "with", "some", "\\ challenge"])
self.uut = StringConverter("testval", list_delimiters=[",", "¸"])
self.uut.value = "a\\n,bug¸g"
self.assertEqual(list(self.uut), ["an", "bug", "g"])
self.assertEqual(list(self.uut.__iter__(False)), ["a\\n", "bug", "g"])
self.assertTrue("bug" in self.uut)
self.assertFalse("but" in self.uut)
self.uut = StringConverter("a, test, \n",
list_delimiters=[","],
strip_whitespaces=True)
self.assertEqual(list(self.uut), ["a", "test"])
self.uut = StringConverter("a, test, \n",
list_delimiters=[","],
strip_whitespaces=False)
self.assertEqual(list(self.uut), ["a", " test", " \n"])
def test_dict_conversion(self):
self.uut = StringConverter("test")
self.assertEqual(dict(self.uut), {"test": ""})
self.uut = StringConverter("test, t")
self.assertEqual(dict(self.uut), {"test": "", "t": ""})
self.uut = StringConverter("test, t: v")
self.assertEqual(dict(self.uut), {"test": "", "t": "v"})
# Check escaping
self.uut = StringConverter("test, t\\: v")
self.assertEqual(dict(self.uut), {"test": "", "t: v": ""})
self.uut = StringConverter("test, t\\: v: t")
self.assertEqual(dict(self.uut), {"test": "", "t: v": "t"})
self.uut = StringConverter("test\\, t\\: v: t")
self.assertEqual(dict(self.uut), {"test, t: v": "t"})
self.uut = StringConverter("test\\, t\\: v: t\\,")
self.assertEqual(dict(self.uut), {"test, t: v": "t,"})
# Check that lists ignore colons
self.assertEqual(list(self.uut), ["test, t: v: t,"])
def test_bool_conversion(self):
self.assertEqual(bool(self.uut), True)
self.uut.value = "yeah"
self.assertEqual(bool(self.uut), True)
self.uut = StringConverter("y")
self.assertEqual(bool(self.uut), True)
self.uut = StringConverter("nope")
self.assertEqual(bool(self.uut), False)
self.uut = StringConverter(" i dont know ")
self.assertRaises(ValueError, bool, self.uut)
def test_equality_comparision(self):
self.assertEqual(StringConverter(" i dont know "),
StringConverter("i dont know"))
self.assertNotEqual(StringConverter(" dont know "),
StringConverter("i dont know "))
self.assertNotEqual(StringConverter(""),
StringConverter("i dont know "))
self.assertNotEqual(5, StringConverter("i dont know "))
if __name__ == '__main__':
unittest.main(verbosity=2)
| andreimacavei/coala | coalib/tests/misc/StringConverterTest.py | Python | agpl-3.0 | 5,178 |
import cPickle as pickle
import numpy as np
from scipy.io import loadmat
import random
def GetData(image_complete, data_type, train_mean, train_std):
"""Return simple array of pixels (shuffled)"""
if data_type == "train":
random.shuffle(image_complete)
else:
print "Shuffling training data"
# Create arrays to hold the shuffled data and labels
shuffled_data = []
shuffled_labels = []
for elem in image_complete:
shuffled_data.append((elem[0]).flatten())
shuffled_labels.append((elem[1][0]))
image_labels_vector = np.zeros((len(shuffled_labels), 10))
# Convert labels to size(10) vectors
for i in range(len(shuffled_labels)):
image_labels_vector[i][shuffled_labels[i]] = 1
shuffled_labels = image_labels_vector
shuffled_data = (shuffled_data-train_mean)/train_std # standardize according to training mean and sd
return shuffled_data, shuffled_labels, image_complete
# Load raw data
test_file = "../dataset/test.mat"
train_file = "../dataset/train.mat"
train_data = loadmat(train_file)
test_data = loadmat(test_file)
test_features = np.array(test_data['test_images'])
test_features = np.rollaxis(test_features, 2, 0) # move the index axis to be the first
train_features = np.array(train_data['train_images'])
train_features = np.rollaxis(train_features, 2, 0) # move the index axis to be the first
train_labels = np.array(train_data['train_labels'])
# Shuffle and standardize data
train_mean, train_std = train_features.mean(), train_features.std()
train_features, train_labels, _ = GetData(zip(train_features, train_labels), "train",train_mean, train_std)
test_features, _, _ = GetData(zip(test_features, np.ones((len(test_features), 1))), "test",train_mean, train_std)
# Save standardized data and labels to disk
train_features.dump('train_features.np')
train_labels.dump('train_labels.pkl')
test_features.dump('test_features.np') | jvpoulos/cs289-hw6 | code-alt/prepare_data.py | Python | mit | 1,948 |
#!/usr/bin/env python
""" MultiQC submodule to parse output from Bamtools bam_stat.py
http://bamtools.sourceforge.net/#bam-stat-py """
from collections import OrderedDict
import logging
import re
from multiqc import config
from multiqc.plots import beeswarm
# Initialise the logger
log = logging.getLogger(__name__)
def parse_reports(self):
""" Find bamtools stats reports and parse their data """
# Set up vars
self.bamtools_stats_data = dict()
regexes = {
'total_reads': r"Total reads:\s*(\d+)",
'mapped_reads': r"Mapped reads:\s*(\d+)",
'mapped_reads_pct': r"Mapped reads:\s*\d+\s+\(([\d\.]+)%\)",
'forward_strand': r"Forward strand:\s*(\d+)",
'forward_strand_pct': r"Forward strand:\s*\d+\s+\(([\d\.]+)%\)",
'reverse_strand': r"Reverse strand:\s*(\d+)",
'reverse_strand_pct': r"Reverse strand:\s*\d+\s+\(([\d\.]+)%\)",
'failed_qc': r"Failed QC:\s*(\d+)",
'failed_qc_pct': r"Failed QC:\s*\d+\s+\(([\d\.]+)%\)",
'duplicates': r"Duplicates:\s*(\d+)",
'duplicates_pct': r"Duplicates:\s*\d+\s+\(([\d\.]+)%\)",
'paired_end': r"Paired-end reads:\s*(\d+)",
'paired_end_pct': r"Paired-end reads:\s*\d+\s+\(([\d\.]+)%\)",
'proper_pairs': r"'Proper-pairs'\s*(\d+)",
'proper_pairs_pct': r"'Proper-pairs'\s*\d+\s+\(([\d\.]+)%\)",
'both_mapped': r"Both pairs mapped:\s*(\d+)",
'both_mapped_pct': r"Both pairs mapped:\s*\d+\s+\(([\d\.]+)%\)",
'read_1': r"Read 1:\s*(\d+)",
'read_2': r"Read 2:\s*(\d+)",
'singletons': r"Singletons:\s*(\d+)",
'singletons_pct': r"Singletons:\s*\d+\s+\(([\d\.]+)%\)",
}
# Go through files and parse data using regexes
for f in self.find_log_files('bamtools/stats'):
d = dict()
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
d[k] = float(r_search.group(1))
if len(d) > 0:
if f['s_name'] in self.bamtools_stats_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='stats')
self.bamtools_stats_data[f['s_name']] = d
# Filter to strip out ignored sample names
self.bamtools_stats_data = self.ignore_samples(self.bamtools_stats_data)
if len(self.bamtools_stats_data) > 0:
# Write to file
self.write_data_file(self.bamtools_stats_data, 'multiqc_bamtools_stats')
# Add to general stats table
self.general_stats_headers['duplicates_pct'] = {
'title': '% Duplicates',
'description': '% Duplicate Reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'OrRd'
}
self.general_stats_headers['mapped_reads_pct'] = {
'title': '% Mapped',
'description': '% Mapped Reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn'
}
for s_name in self.bamtools_stats_data:
if s_name not in self.general_stats_data:
self.general_stats_data[s_name] = dict()
self.general_stats_data[s_name].update( self.bamtools_stats_data[s_name] )
# Make dot plot of counts
keys = OrderedDict()
defaults = {
'min': 0,
'max': 100,
'decimalPlaces': 2,
'suffix': '%'
}
num_defaults = {
'min': 0,
'modify': lambda x: float(x) / 1000000.0,
'decimalPlaces': 2
}
keys['total_reads'] = dict(num_defaults, **{'title': 'Total reads', 'description': 'Total reads (millions)' });
keys['mapped_reads_pct'] = dict(defaults, **{'title': 'Mapped reads' })
keys['forward_strand_pct'] = dict(defaults, **{'title': 'Forward strand' })
keys['reverse_strand_pct'] = dict(defaults, **{'title': 'Reverse strand' })
keys['failed_qc_pct'] = dict(defaults, **{'title': 'Failed QC' })
keys['duplicates_pct'] = dict(defaults, **{'title': 'Duplicates' })
keys['paired_end_pct'] = dict(defaults, **{'title': 'Paired-end', 'description': 'Paired-end reads' })
keys['proper_pairs_pct'] = dict(defaults, **{'title': 'Proper-pairs' })
keys['both_mapped_pct'] = dict(defaults, **{'title': 'Both mapped', 'description': 'Both pairs mapped' })
keys['read_1'] = dict(num_defaults, **{'title': 'Read 1', 'description': 'Read 1 (millions)' });
keys['read_2'] = dict(num_defaults, **{'title': 'Read 2', 'description': 'Read 2 (millions)' });
keys['singletons_pct'] = dict(defaults, **{'title': 'Singletons' })
self.add_section (
name = 'Bamtools Stats',
anchor = 'bamtools-stats',
plot = beeswarm.plot(self.bamtools_stats_data, keys)
)
# Return number of samples found
return len(self.bamtools_stats_data)
| robinandeer/MultiQC | multiqc/modules/bamtools/stats.py | Python | gpl-3.0 | 5,028 |
# -*- coding: utf-8 -*-
#
# Cipher/PKCS1_OAEP.py : PKCS#1 OAEP
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RSA encryption protocol according to PKCS#1 OAEP
See RFC3447__ or the `original RSA Labs specification`__ .
This scheme is more properly called ``RSAES-OAEP``.
As an example, a sender may encrypt a message in this way:
>>> from Cryptodome.Cipher import PKCS1_OAEP
>>> from Cryptodome.PublicKey import RSA
>>>
>>> message = b'To be encrypted'
>>> key = RSA.importKey(open('pubkey.der').read())
>>> cipher = PKCS1_OAEP.new(key)
>>> ciphertext = cipher.encrypt(message)
At the receiver side, decryption can be done using the private part of
the RSA key:
>>> key = RSA.importKey(open('privkey.der').read())
>>> cipher = PKCS1_OAP.new(key)
>>> message = cipher.decrypt(ciphertext)
.. __: http://www.ietf.org/rfc/rfc3447.txt
.. __: http://www.rsa.com/rsalabs/node.asp?id=2125.
"""
__all__ = [ 'new', 'PKCS1OAEP_Cipher' ]
from Cryptodome.Signature.pss import MGF1
import Cryptodome.Hash.SHA1
from Cryptodome.Util.py3compat import *
import Cryptodome.Util.number
from Cryptodome.Util.number import ceil_div, bytes_to_long, long_to_bytes
from Cryptodome.Util.strxor import strxor
from Cryptodome import Random
class PKCS1OAEP_Cipher:
"""This cipher can perform PKCS#1 v1.5 OAEP encryption or decryption."""
def __init__(self, key, hashAlgo, mgfunc, label, randfunc):
"""Initialize this PKCS#1 OAEP cipher object.
:Parameters:
key : an RSA key object
If a private half is given, both encryption and decryption are possible.
If a public half is given, only encryption is possible.
hashAlgo : hash object
The hash function to use. This can be a module under `Cryptodome.Hash`
or an existing hash object created from any of such modules. If not specified,
`Cryptodome.Hash.SHA1` is used.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used (a safe choice).
label : byte string
A label to apply to this particular encryption. If not specified,
an empty string is used. Specifying a label does not improve
security.
randfunc : callable
A function that returns random bytes.
:attention: Modify the mask generation function only if you know what you are doing.
Sender and receiver must use the same one.
"""
self._key = key
if hashAlgo:
self._hashObj = hashAlgo
else:
self._hashObj = Cryptodome.Hash.SHA1
if mgfunc:
self._mgf = mgfunc
else:
self._mgf = lambda x,y: MGF1(x,y,self._hashObj)
self._label = label
self._randfunc = randfunc
def can_encrypt(self):
"""Return True/1 if this cipher object can be used for encryption."""
return self._key.can_encrypt()
def can_decrypt(self):
"""Return True/1 if this cipher object can be used for decryption."""
return self._key.can_decrypt()
def encrypt(self, message):
"""Produce the PKCS#1 OAEP encryption of a message.
This function is named ``RSAES-OAEP-ENCRYPT``, and is specified in
section 7.1.1 of RFC3447.
:Parameters:
message : byte string
The message to encrypt, also known as plaintext. It can be of
variable length, but not longer than the RSA modulus (in bytes)
minus 2, minus twice the hash output size.
:Return: A byte string, the ciphertext in which the message is encrypted.
It is as long as the RSA modulus (in bytes).
:Raise ValueError:
If the RSA key length is not sufficiently long to deal with the given
message.
"""
# TODO: Verify the key is RSA
# See 7.1.1 in RFC3447
modBits = Cryptodome.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
hLen = self._hashObj.digest_size
mLen = len(message)
# Step 1b
ps_len = k-mLen-2*hLen-2
if ps_len<0:
raise ValueError("Plaintext is too long.")
# Step 2a
lHash = self._hashObj.new(self._label).digest()
# Step 2b
ps = bchr(0x00)*ps_len
# Step 2c
db = lHash + ps + bchr(0x01) + message
# Step 2d
ros = self._randfunc(hLen)
# Step 2e
dbMask = self._mgf(ros, k-hLen-1)
# Step 2f
maskedDB = strxor(db, dbMask)
# Step 2g
seedMask = self._mgf(maskedDB, hLen)
# Step 2h
maskedSeed = strxor(ros, seedMask)
# Step 2i
em = bchr(0x00) + maskedSeed + maskedDB
# Step 3a (OS2IP)
em_int = bytes_to_long(em)
# Step 3b (RSAEP)
m_int = self._key._encrypt(em_int)
# Step 3c (I2OSP)
c = long_to_bytes(m_int, k)
return c
def decrypt(self, ct):
"""Decrypt a PKCS#1 OAEP ciphertext.
This function is named ``RSAES-OAEP-DECRYPT``, and is specified in
section 7.1.2 of RFC3447.
:Parameters:
ct : byte string
The ciphertext that contains the message to recover.
:Return: A byte string, the original message.
:Raise ValueError:
If the ciphertext length is incorrect, or if the decryption does not
succeed.
:Raise TypeError:
If the RSA key has no private half.
"""
# See 7.1.2 in RFC3447
modBits = Cryptodome.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
hLen = self._hashObj.digest_size
# Step 1b and 1c
if len(ct) != k or k<hLen+2:
raise ValueError("Ciphertext with incorrect length.")
# Step 2a (O2SIP)
ct_int = bytes_to_long(ct)
# Step 2b (RSADP)
m_int = self._key._decrypt(ct_int)
# Complete step 2c (I2OSP)
em = long_to_bytes(m_int, k)
# Step 3a
lHash = self._hashObj.new(self._label).digest()
# Step 3b
y = em[0]
# y must be 0, but we MUST NOT check it here in order not to
# allow attacks like Manger's (http://dl.acm.org/citation.cfm?id=704143)
maskedSeed = em[1:hLen+1]
maskedDB = em[hLen+1:]
# Step 3c
seedMask = self._mgf(maskedDB, hLen)
# Step 3d
seed = strxor(maskedSeed, seedMask)
# Step 3e
dbMask = self._mgf(seed, k-hLen-1)
# Step 3f
db = strxor(maskedDB, dbMask)
# Step 3g
valid = 1
one = db[hLen:].find(bchr(0x01))
lHash1 = db[:hLen]
if lHash1!=lHash:
valid = 0
if one<0:
valid = 0
if bord(y)!=0:
valid = 0
if not valid:
raise ValueError("Incorrect decryption.")
# Step 4
return db[hLen+one+1:]
def new(key, hashAlgo=None, mgfunc=None, label=b(''), randfunc=None):
"""Return a cipher object `PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption.
:Parameters:
key : RSA key object
The key to use to encrypt or decrypt the message. This is a `Cryptodome.PublicKey.RSA` object.
Decryption is only possible if *key* is a private RSA key.
hashAlgo : hash object
The hash function to use. This can be a module under `Cryptodome.Hash`
or an existing hash object created from any of such modules. If not specified,
`Cryptodome.Hash.SHA1` is used.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used (a safe choice).
label : byte string
A label to apply to this particular encryption. If not specified,
an empty string is used. Specifying a label does not improve
security.
randfunc : callable
A function that returns random bytes.
The default is `Random.get_random_bytes`.
:attention: Modify the mask generation function only if you know what you are doing.
Sender and receiver must use the same one.
"""
if randfunc is None:
randfunc = Random.get_random_bytes
return PKCS1OAEP_Cipher(key, hashAlgo, mgfunc, label, randfunc)
| chronicwaffle/PokemonGo-DesktopMap | app/pylibs/win32/Cryptodome/Cipher/PKCS1_OAEP.py | Python | mit | 9,899 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module containing the two gram OPF model implementation. """
import collections
import itertools
from nupic import encoders
from nupic.data import field_meta
from nupic.frameworks.opf import model
from nupic.frameworks.opf import opf_utils
from opf_utils import InferenceType
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.frameworks.opf.two_gram_model_capnp import TwoGramModelProto
class TwoGramModel(model.Model):
"""
Two-gram benchmark model.
:param inferenceType: (:class:`nupic.frameworks.opf.opf_utils.InferenceType`)
:param encoders: a dict of dicts, eventually sent to
:meth:`~nupic.encoders.multi.MultiEncoder.addMultipleEncoders` (see
docs of that method for param details).
"""
def __init__(self, inferenceType=InferenceType.TemporalNextStep,
encoderParams=()):
super(TwoGramModel, self).__init__(inferenceType)
self._logger = opf_utils.initLogger(self)
self._reset = False
self._hashToValueDict = dict()
self._learningEnabled = True
self._encoder = encoders.MultiEncoder(encoderParams)
self._fieldNames = self._encoder.getScalarNames()
self._prevValues = [None] * len(self._fieldNames)
self._twoGramDicts = [dict() for _ in xrange(len(self._fieldNames))]
def run(self, inputRecord):
results = super(TwoGramModel, self).run(inputRecord)
# Set up the lists of values, defaults, and encoded values.
values = [inputRecord[k] for k in self._fieldNames]
defaults = ['' if type(v) == str else 0 for v in values]
inputFieldEncodings = self._encoder.encodeEachField(inputRecord)
inputBuckets = self._encoder.getBucketIndices(inputRecord)
results.sensorInput = opf_utils.SensorInput(
dataRow=values, dataEncodings=inputFieldEncodings,
sequenceReset=int(self._reset))
# Keep track of the last value associated with each encoded value for that
# predictions can be returned in the original value format.
for value, bucket in itertools.izip(values, inputBuckets):
self._hashToValueDict[bucket] = value
# Update the two-gram dict if learning is enabled.
for bucket, prevValue, twoGramDict in itertools.izip(
inputBuckets, self._prevValues, self._twoGramDicts):
if self._learningEnabled and not self._reset:
if prevValue not in twoGramDict:
twoGramDict[prevValue] = collections.defaultdict(int)
twoGramDict[prevValue][bucket] += 1
# Populate the results.inferences dict with the predictions and encoded
# predictions.
predictions = []
encodedPredictions = []
for bucket, twoGramDict, default, fieldName in (
itertools.izip(inputBuckets, self._twoGramDicts, defaults,
self._fieldNames)):
if bucket in twoGramDict:
probabilities = twoGramDict[bucket].items()
prediction = self._hashToValueDict[
max(probabilities, key=lambda x: x[1])[0]]
predictions.append(prediction)
encodedPredictions.append(self._encoder.encodeField(fieldName,
prediction))
else:
predictions.append(default)
encodedPredictions.append(self._encoder.encodeField(fieldName,
default))
results.inferences = dict()
results.inferences[opf_utils.InferenceElement.prediction] = predictions
results.inferences[opf_utils.InferenceElement.encodings] = encodedPredictions
self._prevValues = inputBuckets
self._reset = False
return results
def finishLearning(self):
self._learningEnabled = False
def setFieldStatistics(self,fieldStats):
"""
Since the two-gram has no use for this information, this is a no-op
"""
pass
def getFieldInfo(self):
fieldTypes = self._encoder.getDecoderOutputFieldTypes()
assert len(self._fieldNames) == len(fieldTypes)
return tuple(field_meta.FieldMetaInfo(*args) for args in
itertools.izip(
self._fieldNames, fieldTypes,
itertools.repeat(field_meta.FieldMetaSpecial.none)))
def getRuntimeStats(self):
# TODO: Add debugging stats.
return dict()
def _getLogger(self):
return self._logger
def resetSequenceStates(self):
self._reset = True
@staticmethod
def getProtoType():
return TwoGramModelProto
@classmethod
def read(cls, proto):
"""
:param proto: capnp TwoGramModelProto message reader
"""
instance = object.__new__(cls)
super(TwoGramModel, instance).__init__(proto=proto.modelBase)
instance._logger = opf_utils.initLogger(instance)
instance._reset = proto.reset
instance._hashToValueDict = {x.hash: x.value
for x in proto.hashToValueDict}
instance._learningEnabled = proto.learningEnabled
instance._encoder = encoders.MultiEncoder.read(proto.encoder)
instance._fieldNames = instance._encoder.getScalarNames()
instance._prevValues = list(proto.prevValues)
instance._twoGramDicts = [dict() for _ in xrange(len(proto.twoGramDicts))]
for idx, field in enumerate(proto.twoGramDicts):
for entry in field:
prev = None if entry.value == -1 else entry.value
instance._twoGramDicts[idx][prev] = collections.defaultdict(int)
for bucket in entry.buckets:
instance._twoGramDicts[idx][prev][bucket.index] = bucket.count
return instance
def write(self, proto):
"""
:param proto: capnp TwoGramModelProto message builder
"""
super(TwoGramModel, self).writeBaseToProto(proto.modelBase)
proto.reset = self._reset
proto.learningEnabled = self._learningEnabled
proto.prevValues = self._prevValues
self._encoder.write(proto.encoder)
proto.hashToValueDict = [{"hash": h, "value": v}
for h, v in self._hashToValueDict.items()]
twoGramDicts = []
for items in self._twoGramDicts:
twoGramArr = []
for prev, values in items.iteritems():
buckets = [{"index": index, "count": count}
for index, count in values.iteritems()]
if prev is None:
prev = -1
twoGramArr.append({"value": prev, "buckets": buckets})
twoGramDicts.append(twoGramArr)
proto.twoGramDicts = twoGramDicts
def __getstate__(self):
# NOTE This deletion doesn't seem to make sense, as someone might want to
# serialize and then continue to use the model instance.
del self._logger
return self.__dict__
def __setstate__(self):
self._logger = opf_utils.initLogger(self)
| vitaly-krugl/nupic | src/nupic/frameworks/opf/two_gram_model.py | Python | agpl-3.0 | 7,631 |
#!/usr/bin/env python
#
#
# Thomas "Mr Men" Etcheverria
# <tetcheve (at) gmail .com>
#
# Created on : 10-12-2013 16:49:07
# Time-stamp: <17-12-2013 13:06:02>
#
# File name : /home/mrmen/calcul-litteral.py
# Description :
#
import random
import sympy
EXER = 5
QUESTION = 10
MONOME = 5
# preparation
variable = ["x", "y", "z", ""]
signe = ["+", "-"]
coeff = []
i=-9.5
while i<10:
coeff.append(i)
i+=1
i=-9
while i<10:
coeff.append(i)
i+=1
def texprint(poly):
polynome = ""
for monome in poly:
tempCoeff = monome[0]
tempVar = monome[1]
if (len(monome)==3):
tempSgn = monome[2]
if (tempCoeff==0):
polynome = polynome[:-2]
elif (tempCoeff==1):
if tempVar == "":
polynome = polynome + "1"
else:
polynome = polynome + str(tempVar)
else:
if (tempVar == ""):
polynome = polynome + str(tempCoeff)
else:
polynome = polynome + str(tempCoeff) + " MUL " + tempVar
if len(monome)==3:
polynome = polynome + tempSgn + " "
else:
polynome = polynome + " "
return polynome
def solevandtexprint(poly):
polynome = texprint(poly)
polynome = polynome.replace("MUL", "*")
x = sympy.var('x')
y = sympy.var('y')
z = sympy.var('z')
result = str(sympy.simplify(polynome))
return result.replace("*", " ")
# exercice
ListExer = []
for exer in range(EXER):
ListExer.append([])
for question in range(QUESTION):
polynome = []
for monome in range(MONOME):
tempVar = variable[random.randint(0,len(variable)-1)]
tempCoeff = coeff[random.randint(0,len(coeff)-1)]
tempSgn = signe[random.randint(0,1)]
if tempCoeff<0:
tempCoeff = "(" + str(tempCoeff) + ")"
L = []
if (monome!=MONOME-1):
L.append(tempCoeff)
L.append(tempVar)
L.append(tempSgn)
else:
L.append(tempCoeff)
L.append(tempVar)
polynome.append(L)
ListExer[exer].append(polynome)
print("\\documentclass{article}\n\\usepackage{amsmath,amssymb,amsthm}\n\\usepackage{enumerate}")
print("\\theoremstyle{definition}\\newtheorem{exercice}{Exercice}")
print("\\newtheorem{solution}{Solution}")
print("\\usepackage[margin=2cm]{geometry}")
print("\\usepackage{fancyhdr}")
print("\\fancyhf{}\\fancyhead[L]{Exercices de simplification}\\fancyhead[R]{Cinquieme}")
print("\\pagestyle{fancy}")
print("\\begin{document}")
for exer in range(EXER):
print("\\begin{exercice}\n\n\\ \\bigskip\n\n")
exo = ListExer[exer]
count = 1
print("\\begin{minipage}{0.5\\linewidth}")
print("\\begin{enumerate}[a)]")
for polynome in exo:
print("\\item $"+texprint(polynome).replace("MUL"," ")+"$\n")
if (count==5):
print("\\end{enumerate}")
print("\\end{minipage}")
print("\\begin{minipage}{0.5\\linewidth}")
print("\\begin{enumerate}[a)]")
print("\\setcounter{enumi}{5}")
count+=1
print("\\end{enumerate}")
print("\\end{minipage}")
print("\\end{exercice}")
print("\n\\vspace*{0.5cm}\n")
print("\n\n\\newpage\n\n")
for exer in range(EXER):
print("\\begin{solution}\n\n\\ \\bigskip\n\n")
exo = ListExer[exer]
count = 1
print("\\begin{minipage}{0.5\\linewidth}")
print("\\begin{enumerate}[a)]")
for polynome in exo:
print("\\item $"+solevandtexprint(polynome)+"$\n")
if (count==5):
print("\\end{enumerate}")
print("\\end{minipage}")
print("\\begin{minipage}{0.5\\linewidth}")
print("\\begin{enumerate}[a)]")
print("\\setcounter{enumi}{5}")
count+=1
print("\\end{enumerate}")
print("\\end{minipage}")
print("\\end{solution}")
print("\n\\vspace*{0.5cm}\n")
print("\\end{document}")
| mrmen/scriptsMaths | temp/simplification.py | Python | gpl-2.0 | 4,052 |
# -*- coding: utf-8 -*-
"""
beeswarmplot.py is part of Coquery.
Copyright (c) 2016-2018 Gero Kunter ([email protected])
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import seaborn as sns
import matplotlib.pyplot as plt
from coquery.visualizer import barcodeplot
class BeeswarmPlot(barcodeplot.BarcodePlot):
axes_style = "whitegrid"
name = "Beeswarm plot"
icon = "Beeswarm_plot"
NUM_COLUMN = "coquery_invisible_corpus_id"
def prepare_arguments(self, data, x, y, z,
levels_x, levels_y):
if not x and not y:
if not self.force_horizontal:
X = [""] * len(data)
Y = data[self.NUM_COLUMN]
self.horizontal = True
else:
X = data[self.NUM_COLUMN]
Y = [""] * len(data)
self.horizontal = False
O = None
elif x:
X = data[x]
Y = data[self.NUM_COLUMN]
O = levels_x
self.horizontal = True
else:
X = data[self.NUM_COLUMN]
Y = data[y]
O = levels_y
self.horizontal = False
if self.z:
self.colorizer.set_reversed(True)
hue = self.colorizer.mpt_to_hex(
self.colorizer.get_hues(data[z]))
self.colorizer.set_reversed(False)
else:
hue = self.colorizer.mpt_to_hex(
self.colorizer.get_palette(n=len(data)))
self.colors = hue
return {"x": X, "y": Y, "order": O}
def set_titles(self):
if not self.x and not self.y:
if not self.force_horizontal:
self._xlab = ""
else:
self._ylab = ""
elif self.x:
self._xlab = self.x
else:
self._ylab = self.y
if not self.horizontal:
self._xlab = self.DEFAULT_LABEL
else:
self._ylab = self.DEFAULT_LABEL
def colorize_artists(self):
self.artists.set_color(self.colors)
def plot_facet(self, data, color, **kwargs):
self.args = self.prepare_arguments(data, self.x, self.y, self.z,
self.levels_x, self.levels_y)
ax = sns.swarmplot(**self.args)
self.artists = ax.collections[0]
provided_visualizations = [BeeswarmPlot]
| gkunter/coquery | coquery/visualizer/beeswarmplot.py | Python | gpl-3.0 | 2,584 |
from .archesmixin import ArchesMixin
from .base import BaseModelClass
from .rnamixin import RNASeqMixin
from .vaemixin import VAEMixin
__all__ = ["ArchesMixin", "BaseModelClass", "RNASeqMixin", "VAEMixin"]
| YosefLab/scVI | scvi/core/models/__init__.py | Python | bsd-3-clause | 207 |
import gc
from unittest import mock
import aioodbc
import pytest
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test___del__(loop, dsn, recwarn, executor):
conn = await aioodbc.connect(dsn=dsn, loop=loop, executor=executor)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
del conn
gc.collect()
w = recwarn.pop()
assert issubclass(w.category, ResourceWarning)
msg = {'connection': mock.ANY, # conn was deleted
'message': 'Unclosed connection'}
if loop.get_debug():
msg['source_traceback'] = mock.ANY
exc_handler.assert_called_with(loop, msg)
assert not loop.is_closed()
| aio-libs/aioodbc | tests/test_slow.py | Python | apache-2.0 | 688 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['NoAR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_ConstantTrend_Seasonal_MonthOfYear_NoAR.py | Python | bsd-3-clause | 170 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<https://micronaet.com>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Export product list in Excel',
'version': '0.1',
'category': 'Report',
'description': '''
Wizard to export product list in Excel
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'product',
'excel_export',
],
'init_xml': [],
'demo': [],
'data': [
'wizard/excel_export_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
| Micronaet/micronaet-campaign | product_export_excel/__openerp__.py | Python | agpl-3.0 | 1,632 |
# coding: utf-8
from vale.parser import ValeParser, get_by_name, annotate_form
from vale.codegen import ValeCodegen
# ... creates an instance of Vale parser
vale = ValeParser()
# ...
# ...
def test_linear_form_11():
# ... parse the Vale code
stmts = "Domain(dim=1,kind='structured') :: Omega" + "\n"
stmts += "Space(domain=Omega,kind='h1') :: V" + "\n"
stmts += "Function(x) :: f" + "\n"
stmts += "Real :: s" + "\n"
stmts += "b(v::V) := < s * f * v >_Omega" + "\n"
ast = vale.parse(stmts)
token = get_by_name(ast, "b")
token = annotate_form(token, ast)
# ...
# ...
kernel = ValeCodegen(token)
print((kernel.doprint("LUA")))
# ...
# ...
# ...
def test_linear_form_12():
# ... parse the Vale code
stmts = "Domain(dim=1,kind='structured') :: Omega" + "\n"
stmts += "Space(domain=Omega,kind='h1') :: V" + "\n"
stmts += "Function(x) :: f" + "\n"
stmts += "b(v::V) := < f * dx(v) >_Omega" + "\n"
ast = vale.parse(stmts)
token = get_by_name(ast, "b")
token = annotate_form(token, ast)
# ...
# ...
kernel = ValeCodegen(token)
print((kernel.doprint("LUA")))
# ...
# ...
# ...
def test_linear_form_13():
# ... parse the Vale code
stmts = "Domain(dim=1,kind='structured') :: Omega" + "\n"
stmts += "Space(domain=Omega,kind='h1') :: V" + "\n"
stmts += "Function(x) :: f" + "\n"
stmts += "b(v::V) := < f * dxx(v) >_Omega" + "\n"
ast = vale.parse(stmts)
token = get_by_name(ast, "b")
token = annotate_form(token, ast)
# ...
# ...
kernel = ValeCodegen(token)
print((kernel.doprint("LUA")))
# ...
# ...
# ...
def test_linear_form_21():
# ... parse the Vale code
stmts = "Domain(dim=1,kind='structured') :: Omega" + "\n"
stmts += "Space(domain=Omega,kind='h1') :: V" + "\n"
stmts += "Function(x) :: f" + "\n"
stmts += "Function(x) :: g" + "\n"
stmts += "b1(u::V) := < f * u >_Omega" + "\n"
stmts += "b2(w::V) := < g * dx(w) >_Omega" + "\n"
stmts += "b((v1,v2)::V) := b1(v1) + b2(v2)"
ast = vale.parse(stmts)
token = get_by_name(ast, "b")
token = annotate_form(token, ast)
# ...
# ...
kernel = ValeCodegen(token)
print((kernel.doprint("LUA")))
# ...
# ...
# ...
def test_linear_form_31():
# ... parse the Vale code
stmts = "Domain(dim=1,kind='structured') :: Omega" + "\n"
stmts += "Space(domain=Omega,kind='h1') :: V" + "\n"
stmts += "Function(x) :: f" + "\n"
stmts += "Function(x) :: g" + "\n"
stmts += "b1(u::V) := < f * u >_Omega" + "\n"
stmts += "b3(w::V) := < g * dx(w) >_Omega" + "\n"
stmts += "b((v1,v2,v3)::V) := b1(v1) + b3(v3)"
ast = vale.parse(stmts)
token = get_by_name(ast, "b")
token = annotate_form(token, ast)
# ...
# ...
kernel = ValeCodegen(token)
print((kernel.doprint("LUA")))
# ...
# ...
# ...
def test_bilinear_form_11():
# ... parse the Vale code
stmts = "Domain(dim=1,kind='structured') :: Omega" + "\n"
stmts += "Space(domain=Omega,kind='h1') :: V" + "\n"
stmts += "a(v::V, u::V) := < dx(v) * dx(u) >_Omega"
ast = vale.parse(stmts)
token = get_by_name(ast, "a")
token = annotate_form(token, ast)
# ...
# ...
kernel = ValeCodegen(token)
print((kernel.doprint("LUA")))
# ...
# ...
# ...
def test_bilinear_form_21():
# ... parse the Vale code
stmts = "Domain(dim=1,kind='structured') :: Omega" + "\n"
stmts += "Space(domain=Omega,kind='h1') :: V" + "\n"
stmts += "a1(v::V, u::V) := < dx(v) * dx(u) >_Omega" + "\n"
stmts += "a2(v::V, u::V) := < v * u >_Omega" + "\n"
stmts += "a3(v::V, u::V) := < dx(v) * u >_Omega" + "\n"
stmts += "a((v1,v2)::V,(u1,u2)::V) := a1(v1,u1) + a2(v2,u2) + a3(v1,u2)"
ast = vale.parse(stmts)
token = get_by_name(ast, "a")
token = annotate_form(token, ast)
# ...
# ...
kernel = ValeCodegen(token)
print((kernel.doprint("LUA")))
# ...
# ...
######################################
if __name__ == "__main__":
# # ... code generation for linear forms
test_linear_form_11()
# test_linear_form_12()
# test_linear_form_13()
# test_linear_form_21()
# test_linear_form_31()
# # ...
# # ... code generation for bilinear forms
# test_bilinear_form_11()
# test_bilinear_form_21()
# # ...
| ratnania/vale | tests/test_codegen_1d.py | Python | mit | 4,709 |
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def hello_world():
hello_str = '''
Hello, I am Keith.
Who are you?
'''
return hello_str
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| KeithYue/FanancialAnalyse | hello.py | Python | gpl-2.0 | 248 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0004_auto_20151113_1457'),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[],
state_operations=[
migrations.RemoveField(
model_name='coursemode',
name='expiration_datetime',
),
migrations.AddField(
model_name='coursemode',
name='_expiration_datetime',
field=models.DateTimeField(db_column=b'expiration_datetime', default=None, blank=True, help_text='OPTIONAL: After this date/time, users will no longer be able to enroll in this mode. Leave this blank if users can enroll in this mode until enrollment closes for the course.', null=True, verbose_name='Upgrade Deadline'),
),
]
)
]
| ESOedX/edx-platform | common/djangoapps/course_modes/migrations/0005_auto_20151217_0958.py | Python | agpl-3.0 | 1,085 |
import re
import copy
import logging
import datetime
import objectpath
from indra.statements import *
logger = logging.getLogger(__name__)
class EidosProcessor(object):
"""This processor extracts INDRA Statements from Eidos JSON-LD output.
Parameters
----------
json_dict : dict
A JSON dictionary containing the Eidos extractions in JSON-LD format.
Attributes
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements that were extracted by the processor.
"""
def __init__(self, json_dict, grounding_ns=None):
self.doc = EidosDocument(json_dict)
self.grounding_ns = grounding_ns
self.statements = []
def extract_causal_relations(self):
"""Extract causal relations as Statements."""
# Get the extractions that are labeled as directed and causal
relations = [e for e in self.doc.extractions if
'DirectedRelation' in e['labels'] and
'Causal' in e['labels']]
# For each relation, we try to extract an INDRA Statement and
# save it if its valid
for relation in relations:
stmt = self.get_causal_relation(relation)
if stmt is not None:
self.statements.append(stmt)
def extract_correlations(self):
events = [e for e in self.doc.extractions if
'UndirectedRelation' in e['labels'] and
'Correlation' in e['labels']]
for event in events:
# For now, just take the first source and first destination.
# Later, might deal with hypergraph representation.
arg_ids = find_args(event, 'argument')
if len(arg_ids) != 2:
logger.warning('Skipping correlation with not 2 arguments.')
# Resolve coreferences by ID
arg_ids = [self.doc.coreferences.get(arg_id, arg_id)
for arg_id in arg_ids]
# Get the actual entities
args = [self.doc.entities[arg_id] for arg_id in arg_ids]
# Make Events from the entities
members = [self.get_event(arg) for arg in args]
# Get the evidence
evidence = self.get_evidence(event)
st = Association(members, evidence=[evidence])
self.statements.append(st)
def extract_events(self):
events = [e for e in self.doc.extractions if
'Concept-Expanded' in e['labels']]
for event_entry in events:
event = self.get_event(event_entry)
evidence = self.get_evidence(event_entry)
event.evidence = [evidence]
if not event.context and evidence.context:
event.context = copy.deepcopy(evidence.context)
evidence.context = None
self.statements.append(event)
def get_event_by_id(self, event_id):
# Resolve coreferences by ID
event_id = self.doc.coreferences.get(event_id, event_id)
# Get the actual entity
event = self.doc.entities[event_id]
return self.get_event(event)
def get_event(self, event):
concept = self.get_concept(event)
states = event.get('states', [])
extracted_states = self.extract_entity_states(states)
polarity = extracted_states.get('polarity')
adjectives = extracted_states.get('adjectives')
delta = QualitativeDelta(polarity=polarity, adjectives=adjectives)
timex = extracted_states.get('time_context', None)
geo = extracted_states.get('geo_context', None)
context = WorldContext(time=timex, geo_location=geo) \
if timex or geo else None
stmt = Event(concept, delta=delta, context=context)
return stmt
def get_causal_relation(self, relation):
# For now, just take the first source and first destination.
# Later, might deal with hypergraph representation.
subj_id = find_arg(relation, 'source')
obj_id = find_arg(relation, 'destination')
if subj_id is None or obj_id is None:
return None
subj = self.get_event_by_id(subj_id)
obj = self.get_event_by_id(obj_id)
evidence = self.get_evidence(relation)
# We also put the adjectives and polarities into annotations since
# they could otherwise get squashed upon preassembly
evidence.annotations['subj_polarity'] = subj.delta.polarity
evidence.annotations['obj_polarity'] = obj.delta.polarity
evidence.annotations['subj_adjectives'] = subj.delta.adjectives
evidence.annotations['obj_adjectives'] = obj.delta.adjectives
evidence.annotations['subj_context'] = subj.context.to_json() if \
subj.context else {}
evidence.annotations['obj_context'] = obj.context.to_json() if \
obj.context else {}
st = Influence(subj, obj, evidence=[evidence])
return st
def get_evidence(self, relation):
"""Return the Evidence object for the INDRA Statment."""
provenance = relation.get('provenance')
# First try looking up the full sentence through provenance
text = None
context = None
if provenance:
sentence_tag = provenance[0].get('sentence')
if sentence_tag and '@id' in sentence_tag:
sentence_id = sentence_tag['@id']
sentence = self.doc.sentences.get(sentence_id)
if sentence is not None:
text = _sanitize(sentence['text'])
# Here we try to get the title of the document and set it
# in the provenance
doc_id = provenance[0].get('document', {}).get('@id')
if doc_id:
title = self.doc.documents.get(doc_id, {}).get('title')
if title:
provenance[0]['document']['title'] = title
annotations = {'found_by': relation.get('rule'),
'provenance': provenance}
if self.doc.dct is not None:
annotations['document_creation_time'] = self.doc.dct.to_json()
epistemics = {}
negations = self.get_negation(relation)
hedgings = self.get_hedging(relation)
if hedgings:
epistemics['hedgings'] = hedgings
if negations:
# This is the INDRA standard to show negation
epistemics['negated'] = True
# But we can also save the texts associated with the negation
# under annotations, just in case it's needed
annotations['negated_texts'] = negations
# If that fails, we can still get the text of the relation
if text is None:
text = _sanitize(relation.get('text'))
ev = Evidence(source_api='eidos', text=text, annotations=annotations,
context=context, epistemics=epistemics)
return ev
@staticmethod
def get_negation(event):
"""Return negation attached to an event.
Example: "states": [{"@type": "State", "type": "NEGATION",
"text": "n't"}]
"""
states = event.get('states', [])
if not states:
return []
negs = [state for state in states
if state.get('type') == 'NEGATION']
neg_texts = [neg['text'] for neg in negs]
return neg_texts
@staticmethod
def get_hedging(event):
"""Return hedging markers attached to an event.
Example: "states": [{"@type": "State", "type": "HEDGE",
"text": "could"}
"""
states = event.get('states', [])
if not states:
return []
hedgings = [state for state in states
if state.get('type') == 'HEDGE']
hedging_texts = [hedging['text'] for hedging in hedgings]
return hedging_texts
def extract_entity_states(self, states):
if states is None:
return {'polarity': None, 'adjectives': []}
polarity = None
adjectives = []
time_context = None
geo_context = None
for state in states:
if polarity is None:
if state['type'] == 'DEC':
polarity = -1
# Handle None entry here
mods = state.get('modifiers') if \
state.get('modifiers') else []
adjectives += [mod['text'] for mod in mods]
elif state['type'] == 'INC':
polarity = 1
mods = state.get('modifiers') if \
state.get('modifiers') else []
adjectives += [mod['text'] for mod in mods]
elif state['type'] == 'QUANT':
adjectives.append(state['text'])
if state['type'] == 'TIMEX':
time_context = self.time_context_from_ref(state)
elif state['type'] == 'LocationExp':
# TODO: here we take only the first geo_context occurrence.
# Eidos sometimes provides a list of locations, it may
# make sense to break those up into multiple statements
# each with one location
if not geo_context:
geo_context = self.geo_context_from_ref(state)
return {'polarity': polarity, 'adjectives': adjectives,
'time_context': time_context, 'geo_context': geo_context}
def get_groundings(self, entity):
"""Return groundings as db_refs for an entity."""
def get_grounding_entries(grounding):
if not grounding:
return None
entries = []
values = grounding.get('values', [])
# Values could still have been a None entry here
if values:
for entry in values:
ont_concept = entry.get('ontologyConcept')
value = entry.get('value')
if ont_concept is None or value is None:
continue
entries.append((ont_concept, value))
return entries
# Save raw text and Eidos scored groundings as db_refs
db_refs = {'TEXT': entity['text']}
groundings = entity.get('groundings')
if not groundings:
return db_refs
for g in groundings:
entries = get_grounding_entries(g)
# Only add these groundings if there are actual values listed
if entries:
key = g['name'].upper()
if self.grounding_ns is not None and \
key not in self.grounding_ns:
continue
if key == 'UN':
db_refs[key] = [(s[0].replace(' ', '_'), s[1])
for s in entries]
elif key == 'WM_FLATTENED' or key == 'WM':
db_refs['WM'] = [(s[0].strip('/'), s[1])
for s in entries]
else:
db_refs[key] = entries
return db_refs
def get_concept(self, entity):
"""Return Concept from an Eidos entity."""
# Use the canonical name as the name of the Concept
name = entity['canonicalName']
db_refs = self.get_groundings(entity)
concept = Concept(name, db_refs=db_refs)
return concept
def time_context_from_ref(self, timex):
"""Return a time context object given a timex reference entry."""
# If the timex has a value set, it means that it refers to a DCT or
# a TimeExpression e.g. "value": {"@id": "_:DCT_1"} and the parameters
# need to be taken from there
value = timex.get('value')
if value:
# Here we get the TimeContext directly from the stashed DCT
# dictionary
tc = self.doc.timexes.get(value['@id'])
return tc
return None
def geo_context_from_ref(self, ref):
"""Return a ref context object given a location reference entry."""
value = ref.get('value')
if value:
# Here we get the RefContext from the stashed geoloc dictionary
rc = self.doc.geolocs.get(value['@id'])
return rc
return None
def get_all_events(self):
"""Return a list of all standalone events from a list
of statements."""
events = []
for stmt in self.statements:
stmt = copy.deepcopy(stmt)
if isinstance(stmt, Influence):
for member in [stmt.subj, stmt.obj]:
member.evidence = stmt.evidence[:]
# Remove the context since it may be for the other member
for ev in member.evidence:
ev.context = None
events.append(member)
elif isinstance(stmt, Association):
for member in stmt.members:
member.evidence = stmt.evidence[:]
# Remove the context since it may be for the other member
for ev in member.evidence:
ev.context = None
events.append(member)
elif isinstance(stmt, Event):
events.append(stmt)
return events
class EidosDocument(object):
def __init__(self, json_dict):
self.tree = objectpath.Tree(json_dict)
self.extractions = []
self.sentences = {}
self.entities = {}
self.documents = {}
self.coreferences = {}
self.timexes = {}
self.geolocs = {}
self.dct = None
self._preprocess_extractions()
def _preprocess_extractions(self):
extractions = \
self.tree.execute("$.extractions[(@.@type is 'Extraction')]")
if not extractions:
return
# Listify for multiple reuse
self.extractions = list(extractions)
# Build a dictionary of entities
entities = [e for e in self.extractions if 'Concept' in
e.get('labels', [])]
self.entities = {entity['@id']: entity for entity in entities}
# Build a dictionary of sentences and document creation times (DCTs)
documents = self.tree.execute("$.documents[(@.@type is 'Document')]")
self.sentences = {}
for document in documents:
dct = document.get('dct')
title = document.get('title')
self.documents[document['@id']] = {'title': title}
# We stash the DCT here as a TimeContext object
if dct is not None:
self.dct = self.time_context_from_dct(dct)
self.timexes[dct['@id']] = self.dct
sentences = document.get('sentences', [])
for sent in sentences:
self.sentences[sent['@id']] = sent
timexes = sent.get('timexes')
if timexes:
for timex in timexes:
tc = time_context_from_timex(timex)
self.timexes[timex['@id']] = tc
geolocs = sent.get('geolocs')
if geolocs:
for geoloc in geolocs:
rc = ref_context_from_geoloc(geoloc)
self.geolocs[geoloc['@id']] = rc
# Build a dictionary of coreferences
for extraction in self.extractions:
if 'Coreference' in extraction['labels']:
reference = find_arg(extraction, 'reference')
anchor = find_arg(extraction, 'anchor')
self.coreferences[reference] = anchor
@staticmethod
def time_context_from_dct(dct):
"""Return a time context object given a DCT entry."""
time_text = dct.get('text')
start = _get_time_stamp(dct.get('start'))
end = _get_time_stamp(dct.get('end'))
duration = _get_duration(start, end)
tc = TimeContext(text=time_text, start=start, end=end,
duration=duration)
return tc
def _sanitize(text):
"""Return sanitized Eidos text field for human readability."""
d = {'-LRB-': '(', '-RRB-': ')'}
return re.sub('|'.join(d.keys()), lambda m: d[m.group(0)], text)
def _get_time_stamp(entry):
"""Return datetime object from a timex constraint start/end entry.
Example string format to convert: 2018-01-01T00:00
"""
if not entry or entry == 'Undef':
return None
try:
dt = datetime.datetime.strptime(entry, '%Y-%m-%dT%H:%M')
except Exception as e:
logger.debug('Could not parse %s format' % entry)
return None
return dt
def _get_duration(start, end):
if not start or not end:
return None
try:
duration = int((end - start).total_seconds())
except Exception as e:
logger.debug('Failed to get duration from %s and %s' %
(str(start), str(end)))
duration = None
return duration
def ref_context_from_geoloc(geoloc):
"""Return a RefContext object given a geoloc entry."""
text = geoloc.get('text')
geoid = geoloc.get('geoID')
rc = RefContext(name=text, db_refs={'GEOID': geoid})
return rc
def time_context_from_timex(timex):
"""Return a TimeContext object given a timex entry."""
time_text = timex.get('text')
intervals = timex.get('intervals')
if not intervals:
start = end = duration = None
else:
constraint = intervals[0]
start = _get_time_stamp(constraint.get('start'))
end = _get_time_stamp(constraint.get('end'))
duration = _get_duration(start, end)
tc = TimeContext(text=time_text, start=start, end=end,
duration=duration)
return tc
def find_arg(event, arg_type):
"""Return ID of the first argument of a given type"""
obj_ids = find_args(event, arg_type)
if not obj_ids:
return None
else:
return obj_ids[0]
def find_args(event, arg_type):
"""Return IDs of all arguments of a given type"""
args = event.get('arguments', {})
obj_tags = [arg for arg in args if arg['type'] == arg_type]
if obj_tags:
return [o['value']['@id'] for o in obj_tags]
else:
return []
| johnbachman/belpy | indra/sources/eidos/processor.py | Python | mit | 18,394 |
from pyuploadcare.client import Uploadcare
from pyuploadcare.ucare_cli.commands.helpers import pprint
def register_arguments(subparsers):
subparser = subparsers.add_parser(
"list_webhooks", help="list all webhooks"
)
subparser.set_defaults(func=list_webhooks)
return subparser
def list_webhooks(arg_namespace, client: Uploadcare):
webhooks = [webhook.dict() for webhook in client.list_webhooks()]
pprint(webhooks)
| uploadcare/pyuploadcare | pyuploadcare/ucare_cli/commands/list_webhooks.py | Python | mit | 450 |
import re
from gwibber.microblog import network, util
import gnomekeyring
from oauth import oauth
from gwibber.microblog.util import log, resources
from gettext import lgettext as _
log.logger.name = "StatusNet"
PROTOCOL_INFO = {
"name": "StatusNet",
"version": 1.1,
"config": [
"private:secret_token",
"access_token",
"username",
"site_display_name",
"url_prefix",
"color",
"receive_enabled",
"send_enabled",
],
"authtype": "oauth1a",
"color": "#4E9A06",
"features": [
"send",
"receive",
"search",
"tag",
"reply",
"responses",
"private",
"public",
"delete",
"retweet",
"like",
"send_thread",
"send_private",
"user_messages",
"sinceid",
],
"default_streams": [
"receive",
"images",
"responses",
"private",
"public",
],
}
class Client:
def __init__(self, acct):
if acct.has_key("url_prefix"):
pref = "" if acct["url_prefix"].startswith("http") else "https://"
self.url_prefix = pref + acct["url_prefix"]
if acct.has_key("secret_token") and acct.has_key("password"): acct.pop("password")
if not acct.has_key("url_prefix") and acct.has_key("domain"): acct.pop("domain")
self.account = acct
def _common(self, data):
m = {}
try:
m["mid"] = str(data["id"])
m["service"] = "statusnet"
m["account"] = self.account["id"]
m["time"] = util.parsetime(data["created_at"])
m["source"] = data.get("source", False)
m["text"] = data["text"]
m["to_me"] = ("@%s" % self.account["username"]) in data["text"]
m["html"] = util.linkify(m["text"],
((util.PARSE_HASH, '#<a class="hash" href="%s#search?q=\\1">\\1</a>' % self.account["url_prefix"]),
(util.PARSE_NICK, '@<a class="nick" href="%s/\\1">\\1</a>' % self.account["url_prefix"])))
m["content"] = util.linkify(m["text"],
((util.PARSE_HASH, '#<a class="hash" href="gwibber:/tag?acct=%s&query=\\1">\\1</a>' % m["account"]),
(util.PARSE_NICK, '@<a class="nick" href="gwibber:/user?acct=%s&name=\\1">\\1</a>' % m["account"])))
images = []
if data.get("attachments", 0):
for a in data["attachments"]:
mime = a.get("mimetype", "")
if mime and mime.startswith("image") and a.get("url", 0):
images.append({"src": a["url"], "url": a["url"]})
images.extend(util.imgpreview(m["text"]))
if images:
m["images"] = images
m["type"] = "photo"
except:
log.logger.error("%s failure - %s", PROTOCOL_INFO["name"], data)
return m
def _message(self, data):
m = self._common(data)
if data.has_key("in_reply_to_status_id"):
if data["in_reply_to_status_id"]:
m["reply"] = {}
m["reply"]["id"] = data["in_reply_to_status_id"]
m["reply"]["nick"] = data["in_reply_to_screen_name"]
m["reply"]["url"] = "/".join((self.account["url_prefix"], "notice", str(m["reply"]["id"])))
user = data.get("user", data.get("sender", 0))
m["sender"] = {}
m["sender"]["name"] = user["name"]
m["sender"]["nick"] = user["screen_name"]
m["sender"]["id"] = user["id"]
m["sender"]["location"] = user["location"]
m["sender"]["followers"] = user["followers_count"]
m["sender"]["image"] = user["profile_image_url"]
m["sender"]["url"] = "/".join((self.account["url_prefix"], m["sender"]["nick"]))
m["sender"]["is_me"] = m["sender"]["nick"] == self.account["username"]
m["url"] = "/".join((self.account["url_prefix"], "notice", m["mid"]))
return m
def _private(self, data):
m = self._message(data)
m["private"] = True
m["recipient"] = {}
m["recipient"]["name"] = data["recipient"]["name"]
m["recipient"]["nick"] = data["recipient"]["screen_name"]
m["recipient"]["id"] = data["recipient"]["id"]
m["recipient"]["image"] = data["recipient"]["profile_image_url"]
m["recipient"]["location"] = data["recipient"]["location"]
m["recipient"]["url"] = "/".join((self.account["url_prefix"], m["recipient"]["nick"]))
m["recipient"]["is_me"] = m["recipient"]["nick"].lower() == self.account["username"].lower()
m["to_me"] = m["recipient"]["is_me"]
return m
def _result(self, data):
m = self._common(data)
if data["to_user_id"]:
m["reply"] = {}
m["reply"]["id"] = data["to_user_id"]
m["reply"]["nick"] = data["to_user"]
m["sender"] = {}
m["sender"]["nick"] = data["from_user"]
m["sender"]["id"] = data["from_user_id"]
m["sender"]["image"] = data["profile_image_url"]
m["sender"]["url"] = "/".join((self.account["url_prefix"], m["sender"]["nick"]))
m["url"] = "/".join((self.account["url_prefix"], "notice", str(m["mid"])))
return m
def _get(self, path, parse="message", post=False, single=False, **args):
if not self.account.has_key("access_token") and not self.account.has_key("secret_token"):
log.logger.error("%s unexpected result - %s", PROTOCOL_INFO["name"], _("Account needs to be re-authorized"))
return [{"error": {"type": "auth", "account": self.account, "message": _("Account needs to be re-authorized")}}]
url = "/".join((self.account["url_prefix"], "api", path))
self.sigmethod = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.consumer = oauth.OAuthConsumer("anonymous", "anonymous")
self.token = oauth.OAuthToken(self.account["access_token"], self.account["secret_token"])
parameters = util.compact(args)
request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, self.token,
http_method=post and "POST" or "GET", http_url=url, parameters=parameters)
request.sign_request(self.sigmethod, self.consumer, self.token)
if post:
data = network.Download(request.to_url(), parameters, post).get_json()
else:
data = network.Download(request.to_url(), None, post).get_json()
resources.dump(self.account["service"], self.account["id"], data)
if isinstance(data, dict) and data.get("error", 0):
log.logger.error("%s failure - %s", PROTOCOL_INFO["name"], data["error"])
if "authenticate" in data["error"]:
return [{"error": {"type": "auth", "account": self.account, "message": data["error"]}}]
else:
return [{"error": {"type": "unknown", "account": self.account, "message": data["error"]}}]
elif isinstance(data, str):
log.logger.error("%s unexpected result - %s", PROTOCOL_INFO["name"], data)
return [{"error": {"type": "unknown", "account": self.account, "message": data}}]
if single: return [getattr(self, "_%s" % parse)(data)]
if parse: return [getattr(self, "_%s" % parse)(m) for m in data]
else: return []
return [self._result(m) for m in data]
def _search(self, **args):
data = network.Download("%s/api/search.json" % self.account["url_prefix"], util.compact(args))
data = data.get_json()
return [self._result(m) for m in data["results"]]
def __call__(self, opname, **args):
return getattr(self, opname)(**args)
def receive(self, count=util.COUNT, since=None):
return self._get("statuses/friends_timeline.json", count=count, since_id=since, source="Gwibber")
def user_messages(self, id=None, count=util.COUNT, since=None):
return self._get("statuses/user_timeline.json", id=id, count=count, since_id=since, source="Gwibber")
def responses(self, count=util.COUNT, since=None):
return self._get("statuses/mentions.json", count=count, since_id=since, source="Gwibber")
def private(self, count=util.COUNT, since=None):
private = self._get("direct_messages.json", "private", count=count, since_id=since, source="Gwibber") or []
private_sent = self._get("direct_messages/sent.json", "private", count=count, since_id=since, source="Gwibber") or []
return private + private_sent
def public(self, count=util.COUNT, since=None):
return self._get("statuses/public_timeline.json", source="Gwibber")
def search(self, query, count=util.COUNT, since=None):
return self._search(q=query, rpp=count, since_id=since, source="Gwibber")
def tag(self, query, count=util.COUNT, since=None):
return self._search(q="#%s" % query, count=count, since_id=since, source="Gwibber")
def delete(self, message):
self._get("statuses/destroy/%s.json" % message["mid"], None, post=True, do=1, source="Gwibber")
return []
def like(self, message):
self._get("favorites/create/%s.json" % message["mid"], None, post=True, do=1, source="Gwibber")
return []
def send(self, message):
return self._get("statuses/update.json", post=True, single=True, status=message, source="Gwibber")
def send_private(self, message, private):
return self._get("direct_messages/new.json", "private", post=True, single=True,
text=message, screen_name=private["sender"]["nick"], source="Gwibber")
def send_thread(self, message, target):
return self._get("statuses/update.json", post=True, single=True,
status=message, in_reply_to_status_id=target["mid"], source="Gwibber")
| rhg/Qwibber | gwibber/microblog/plugins/statusnet/__init__.py | Python | gpl-2.0 | 9,075 |
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Mihai Andrei <[email protected]>
"""
import unittest
from StringIO import StringIO
from tvb.adapters.uploaders.obj.parser import ObjWriter, ObjParser
class ObjFilesTest(unittest.TestCase):
def test_write_simple(self):
f = StringIO()
w = ObjWriter(f)
w.write([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 1, 2], [0, 1, 3]])
self.assertTrue(len(f.getvalue()) > 15)
def test_write_with_normals(self):
f = StringIO()
w = ObjWriter(f)
w.write([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 1, 2], [0, 1, 3]],
[[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]],
comment="exported from test")
self.assertTrue(len(f.getvalue()) > 15)
def test_write_parse_cycle(self):
f = StringIO()
w = ObjWriter(f)
vertices = [(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)]
normals = [(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1)]
triangles = [(0, 1, 2), (0, 1, 3)]
w.write(vertices, triangles, normals)
f.seek(0)
p = ObjParser()
p.read(f)
self.assertEqual(vertices, p.vertices)
self.assertEqual(normals, p.normals)
# self.assertEqual(triangles, p.faces)
def suite():
"""
Gather all the tests in a test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(ObjFilesTest))
return test_suite
if __name__ == "__main__":
#To run tests individually.
unittest.main()
| rajul/tvb-framework | tvb/tests/framework/adapters/uploaders/obj_file_test.py | Python | gpl-2.0 | 2,987 |
################################################################################
#
# This program is part of the ZenODBC Zenpack for Zenoss.
# Copyright (C) 2009, 2010 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""interfaces
describes the form field to the user interface.
$Id: interfaces.py,v 1.0 2010/06/17 23:33:45 egor Exp $"""
__version__ = "$Revision: 1.0 $"[11:-2]
from Products.Zuul.interfaces import IInfo
from Products.Zuul.form import schema
from Products.Zuul.utils import ZuulMessageFactory as _t
class IODBCDataSourceInfo(IInfo):
name = schema.Text(title=_t(u'Name'))
enabled = schema.Bool(title=_t(u'Enabled'))
cs = schema.Text(title=_t(u'Connection String'))
sql = schema.TextLine(title=_t(u'SQL Query'))
| anksp21/Community-Zenpacks | ZenPacks.community.ZenODBC/ZenPacks/community/ZenODBC/interfaces.py | Python | gpl-2.0 | 947 |
#!/usr/bin/python
import pytricia
import reading_file_to_dict
import sys
import pprint
import csv
import p_trie
def patricia(device_values):
pyt_src = pytricia.PyTricia()
pyt_dst = pytricia.PyTricia()
return pyt_src,pyt_dst
def check_tcp_udp(flow_rule):
if(flow_rule["nw_proto"]=="6"):
return True
else :
return False
def add_rule_to_patricia(pyt_src,pyt_dst,flow_rule):
src_ip=flow_rule["src_ip"]
dst_ip=flow_rule["dst_ip"]
aas=flow_rule["aasno"]
pyt_src.insert(src_ip,aas)
pyt_dst.insert(dst_ip,aas)
def add_rule_to_newft(flow_rule):
print >>f_new, flow_rule
def finding_patricia_empty(pyt):
if(len(pyt)==0):
return True
else :
return False
def detection_algorithm(r,gamma):
if(check_tcp_udp(r)==check_tcp_udp(gamma)):
add_rule_to_newft(r)
return
if(subset(pyt_src,pyt_dst,r,gamma)=="equal"): #do subset here
if(r["action "]==gamma["action "]):
conflict_resolver(gamma,r,redundancy)
print "Conflict is Redundancy : Sent to resolving"
else:
if(r["priority"]==gamma["priority"]):
conflict_resolver(r,gamma,correlation)
print "Conflict is Correlation : Sent to resolving"
else:
print "Conflict is Generalization : Sent to resolving"
if(subset(pyt_src,pyt_dst,r,gamma)=="reverse"): #do subset here
if(r["action "]==gamma["action "]):
print "Conflict is Redundancy : Sent to resolving"
conflict_resolver(r,gamma,redundancy)
elif(r["priority"]==gamma["priority"]):
conflict_resolver(r,gamma,correlation)
print "Conflict is Correlation : Sent to resolving"
else:
conflict_resolver(r,gamma,shadowing)
print "Conflict is Shadowing : Sent to resolving"
if(subset(pyt_src,pyt_dst,r,gamma)=="intersection"):
if(r["action "]==gamma["action "]):
print "Conflict is Overlap : Sent to resolving"
conflict_resolver(r,gamma,overlap)
else :
conflict_resolver(r,gamma,correlation)
print "Conflict is Correlation : Sent to resolving"
def detect_imbrication(r,device_values):
for gamma in device_values:
if(r["nw_proto"]==gamma["nw_proto"]):
if(subset(pyt_src,pyt_dst,r,gamma)=="intersection"):
print "Conflict is Imbrication : Sent to resolving"
conflict_resolver(r,gamma,imbrication)
def creating_dict():
# Calls the csv_dict_list function, passing the named csv
device_values = reading_file_to_dict.csv_dict_list(sys.argv[1])
# Prints the results nice and pretty like
#pprint.pprint(device_values)
return device_values
def conflict_resolver(r,gamma,conflict_type):
if(conflict_type==shadowing or conflict_type==redundancy):
add_rule_to_newft(r)
if(conflict_type==overlap):
print "Do union here" #union operation
if(conflict_type==imbrication):
a=input('Cross layer conflict. Choose one flow rule : ')
if(a==r):
add_rule_to_newft(r)
else :
add_rule_to_newft(gamma)
if __name__ == "__main__" :
device_values = creating_dict()
pyt_src,pyt_dst = patricia(device_values)
finding_patricia_empty(pyt_src)
r=device_values[0]
gamma=device_values[1]
f_new=open("new_flow_table","w+")
#print r["action "]
#add_rule_to_newft(r)
#add_rule_to_newft(gamma)
detection_algorithm(gamma,r)
#print r["nw_proto"]
#add_rule_to_patricia(pyt_src,pyt_dst,r)
#check_tcp_udp(r)
#finding_patricia_empty(pyt_src)
| VRaviTheja/SDN-policy | testing/testing_detection.py | Python | apache-2.0 | 3,341 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Abishek Subramanian, Cisco Systems, Inc.
# @author: Sergey Sudakovich, Cisco Systems, Inc.
import logging
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
LOG = logging.getLogger(__name__)
def get_tenant_choices(request):
tenant_choices = [('', _("Select a tenant"))]
tenants = []
try:
tenants, has_more = api.keystone.tenant_list(request)
except Exception:
msg = _('Projects could not be retrieved.')
exceptions.handle(request, msg)
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
return tenant_choices
class CreateNetworkProfile(forms.SelfHandlingForm):
"""Create Network Profile form."""
name = forms.CharField(max_length=255,
label=_("Name"),
required=True)
segment_type = forms.ChoiceField(label=_('Segment Type'),
choices=[('vlan', _('VLAN')),
('vxlan', _('VXLAN'))],
widget=forms.Select
(attrs={'class': 'switchable',
'data-slug': 'segtype'}))
segment_range = forms.CharField(max_length=255,
label=_("Segment Range"),
required=True,
help_text=_("1-4093 for VLAN"))
# TODO(absubram): Update help text for VXLAN segment range value.
multicast_ip_range = forms.CharField(max_length=30,
label=_("Multicast IP Range"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on':
'segtype',
'data-segtype-vxlan':
_("Multicast IP Range")}))
physical_network = forms.CharField(max_length=255,
label=_("Physical Network"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-vlan':
_("Physical Network")}))
project_id = forms.ChoiceField(label=_("Project"),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateNetworkProfile, self).__init__(request, *args, **kwargs)
self.fields['project_id'].choices = get_tenant_choices(request)
def handle(self, request, data):
try:
LOG.debug('request = %(req)s, params = %(params)s',
{'req': request, 'params': data})
profile = api.neutron.profile_create(request,
name=data['name'],
segment_type=
data['segment_type'],
segment_range=
data['segment_range'],
physical_network=
data['physical_network'],
multicast_ip_range=
data['multicast_ip_range'],
tenant_id=data['project_id'])
msg = _('Network Profile %s '
'was successfully created.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return profile
except Exception:
redirect = reverse('horizon:router:nexus1000v:index')
msg = _('Failed to create network profile %s') % data['name']
LOG.error(msg)
exceptions.handle(request, msg, redirect=redirect)
class UpdateNetworkProfile(forms.SelfHandlingForm):
"""Update Network Profile form."""
profile_id = forms.CharField(label=_("ID"),
widget=forms.HiddenInput())
name = forms.CharField(max_length=255,
label=_("Name"), required=True)
segment_type = forms.ChoiceField(label=_('Segment Type'),
choices=[('vlan', 'VLAN'),
('vxlan', 'VXLAN')],
widget=forms.Select
(attrs={'class': 'switchable'}))
segment_range = forms.CharField(max_length=255,
label=_("Segment Range"),
required=True)
physical_network = forms.CharField(max_length=255,
label=_("Physical Network"),
required=False)
project_id = forms.CharField(label=_("Project"), required=False)
def handle(self, request, data):
try:
LOG.debug('request = %(req)s, params = %(params)s',
{'req': request, 'params': data})
profile = api.neutron.profile_update(request,
data['profile_id'],
name=data['name'],
segment_type=
data['segment_type'],
segment_range=
data['segment_range'],
physical_network=
data['physical_network'])
msg = _('Network Profile %s '
'was successfully updated.') % data['profile_id']
LOG.debug(msg)
messages.success(request, msg)
return profile
except Exception:
LOG.error('Failed to update network profile (%s).',
data['profile_id'])
redirect = reverse('horizon:router:nexus1000v:index')
exceptions.handle(request, msg, redirect=redirect)
| neudesk/neucloud | openstack_dashboard/dashboards/router/nexus1000v/forms.py | Python | apache-2.0 | 7,463 |
"""
Django ORM model specifications for the Course Structures sub-application
"""
import json
import logging
from collections import OrderedDict
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField, UsageKey
from util.models import CompressedTextField
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class CourseStructure(TimeStampedModel):
"""
The CourseStructure model is an aggregated representation of the course content tree
"""
class Meta(object):
app_label = 'course_structures'
course_id = CourseKeyField(max_length=255, db_index=True, unique=True, verbose_name='Course ID')
# Right now the only thing we do with the structure doc is store it and
# send it on request. If we need to store a more complex data model later,
# we can do so and build a migration. The only problem with a normalized
# data model for this is that it will likely involve hundreds of rows, and
# we'd have to be careful about caching.
structure_json = CompressedTextField(verbose_name='Structure JSON', blank=True, null=True)
# JSON mapping of discussion ids to usage keys for the corresponding discussion modules
discussion_id_map_json = CompressedTextField(verbose_name='Discussion ID Map JSON', blank=True, null=True)
@property
def structure(self):
"""
Deserializes a course structure JSON object
"""
if self.structure_json:
return json.loads(self.structure_json)
return None
@property
def ordered_blocks(self):
"""
Return the blocks in the order with which they're seen in the courseware. Parents are ordered before children.
"""
if self.structure:
ordered_blocks = OrderedDict()
self._traverse_tree(self.structure['root'], self.structure['blocks'], ordered_blocks)
return ordered_blocks
@property
def discussion_id_map(self):
"""
Return a mapping of discussion ids to usage keys of the corresponding discussion modules.
"""
if self.discussion_id_map_json:
result = json.loads(self.discussion_id_map_json)
for discussion_id in result:
# Usage key strings might not include the course run, so we add it back in with map_into_course
result[discussion_id] = UsageKey.from_string(result[discussion_id]).map_into_course(self.course_id)
return result
return None
def _traverse_tree(self, block, unordered_structure, ordered_blocks, parent=None):
"""
Traverses the tree and fills in the ordered_blocks OrderedDict with the blocks in
the order that they appear in the course.
"""
# find the dictionary entry for the current node
cur_block = unordered_structure[block]
if parent:
cur_block['parent'] = parent
ordered_blocks[block] = cur_block
for child_node in cur_block['children']:
self._traverse_tree(child_node, unordered_structure, ordered_blocks, parent=block)
| BehavioralInsightsTeam/edx-platform | openedx/core/djangoapps/content/course_structures/models.py | Python | agpl-3.0 | 3,148 |
# coding=utf-8
# Author: Dustyn Gibson <[email protected]>
# URL: http://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import validators
from requests.compat import urljoin
import sickbeard
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickchill.helper.common import convert_size, try_int
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class KatProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "KickAssTorrents")
self.public = True
self.confirmed = True
self.minseed = None
self.minleech = None
self.url = "https://kat.cr"
self.urls = {"search": urljoin(self.url, "%s/")}
self.custom_url = None
self.cache = tvcache.TVCache(self, search_params={"RSS": ["tv", "anime"]})
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals, too-many-statements
results = []
anime = (self.show and self.show.anime) or (ep_obj and ep_obj.show and ep_obj.show.anime) or False
search_params = {
"q": "",
"field": "seeders",
"sorder": "desc",
"rss": 1,
"category": ("tv", "anime")[anime]
}
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
search_params["q"] = search_string if mode != "RSS" else ""
search_params["field"] = "seeders" if mode != "RSS" else "time_add"
if mode != "RSS":
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_url = self.urls["search"] % ("usearch" if mode != "RSS" else search_string)
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
return results
search_url = urljoin(self.custom_url, search_url.split(self.url)[1])
data = self.get_url(search_url, params=search_params, returns="text")
if not data:
logger.log("URL did not return results/data, if the results are on the site maybe try a custom url, or a different one", logger.DEBUG)
continue
if not data.startswith("<?xml"):
logger.log("Expected xml but got something else, is your mirror failing?", logger.INFO)
continue
with BS4Parser(data, "html5lib") as html:
for item in html("item"):
try:
title = item.title.get_text(strip=True)
# Use the torcache link kat provides,
# unless it is not torcache or we are not using blackhole
# because we want to use magnets if connecting direct to client
# so that proxies work.
download_url = item.enclosure["url"]
if sickbeard.TORRENT_METHOD != "blackhole" or "torcache" not in download_url:
download_url = item.find("torrent:magneturi").next.replace("CDATA", "").strip("[!]") + self._custom_trackers
if not (title and download_url):
continue
seeders = try_int(item.find("torrent:seeds").get_text(strip=True))
leechers = try_int(item.find("torrent:peers").get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
verified = bool(try_int(item.find("torrent:verified").get_text(strip=True)))
if self.confirmed and not verified:
if mode != "RSS":
logger.log("Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
continue
torrent_size = item.find("torrent:contentlength").get_text(strip=True)
size = convert_size(torrent_size) or -1
info_hash = item.find("torrent:infohash").get_text(strip=True)
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError):
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = KatProvider()
| dfalt974/SickRage | sickbeard/providers/kat.py | Python | gpl-3.0 | 6,455 |
from __future__ import unicode_literals
from memory.mem import _Memory
class Windows2003ServerR2Memory(_Memory):
def __init__(self, params):
super(Windows2003ServerR2Memory, self).__init__(params)
def csv_all_modules_dll(self):
super(Windows2003ServerR2Memory, self)._csv_all_modules_dll()
def csv_all_modules_opened_files(self):
super(Windows2003ServerR2Memory, self)._csv_all_modules_opened_files()
def json_all_modules_dll(self):
super(Windows2003ServerR2Memory, self)._json_all_modules_dll()
def json_all_modules_opened_files(self):
super(Windows2003ServerR2Memory, self)._json_all_modules_opened_files() | SekoiaLab/Fastir_Collector | memory/windows2003ServerR2Memory.py | Python | gpl-3.0 | 674 |
#!/usr/bin/env python
"""
@file edgeObj.py
@author Simon Box
@date 31/01/2013
Class for reading an edg.xml file and storing edgeObj data.
"""
from xml.dom.minidom import parse
class readEdges:
def __init__(self,filePath):
self.dom = parse(filePath)
#for node in dom.getElementsByTagName('edge'):
def getEdgeElementByName(self,name):
for node in self.dom.getElementsByTagName('edge'):
if name == node.getAttributeNode('id').nodeValue:
returnNode = node
return(returnNode)
def getDownstreamEdges(self,edgeName):
listOfEdges=[]
interestEdge = self.getEdgeElementByName(edgeName)
frm = interestEdge.getAttributeNode('from').nodeValue
to = interestEdge.getAttributeNode('to').nodeValue
for node in self.dom.getElementsByTagName('edge'):
if (to==node.getAttributeNode('from').nodeValue and frm!=node.getAttributeNode('to').nodeValue):
listOfEdges.append(node)
return(listOfEdges)
def getEdgeName(self,edge):
return(edge.getAttributeNode('id').nodeValue)
| intelaligent/tctb | legacy_src/corridor/readEdges.py | Python | gpl-3.0 | 1,235 |
from xml.etree import ElementTree
RESULT_MAPPING = {
'failure': 'fail',
'error': 'error',
'skipped': 'skipped'
}
def get_ms(val):
return int(float(val) * 1000)
class Parser(object):
def __init__(self, i):
self.input = i
self.tests = []
def parse(self, _badge_dir):
xml = ElementTree.parse(self.input)
root = xml.getroot()
return self.parse_root(root)
def parse_root(self, root):
if root.tag == 'testsuites':
for subroot in root:
self.parse_testsuite(subroot)
else:
self.parse_testsuite(root)
res = {
"version": 1,
"tests": self.tests
}
return res
def parse_testsuite(self, root):
assert root.tag == 'testsuite'
ts_name = root.attrib.get('name', 'None')
if not ts_name:
ts_name = 'None'
for el in root:
if el.tag != 'testcase':
continue
error = root.find('error')
if error is not None:
error = error.text
tc = self.parse_testcase(el, ts_name, error=error)
self.tests.append(tc)
def parse_testcase(self, el, ts_name, error=None):
time = el.attrib.get('time')
duration = 0
if time:
duration = get_ms(time)
suite = ts_name
tc = {
"measurements": [],
"name": el.attrib['name'],
"status": 'ok',
"suite": suite,
"duration": duration
}
message = el.attrib.get('message', '')
if message is None:
message = error
stack = None
if error:
stack = error
for e in el:
if e.tag in ('failure', 'error', 'skipped'):
if e.text:
if not stack:
stack = ''
stack += '\n'
stack += e.text
tc['status'] = RESULT_MAPPING[e.tag]
if message:
tc['message'] = message
if stack:
tc['stack'] = stack
return tc
| InfraBox/infrabox | src/pyinfraboxutils/testresult.py | Python | mit | 2,210 |
# coding=utf-8
from __future__ import unicode_literals
import os
from importlib import import_module
def path_for_import(name):
"""
Returns the directory path for the given package or module.
"""
return os.path.dirname(os.path.abspath(import_module(name).__file__))
def import_dotted_path(path):
"""
Takes a dotted path to a member name in a module, and returns
the member after importing it.
"""
try:
module_path, member_name = path.rsplit(".", 1)
module = import_module(module_path)
return getattr(module, member_name)
except (ValueError, ImportError, AttributeError) as e:
raise ImportError("Could not import the name: %s: %s" % (path, e))
| hyperwood/Flask-Project | flask_project/importing.py | Python | mit | 719 |
import graphene
import thefederation.schema
class Query(thefederation.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
| jaywink/diaspora-hub | config/schema.py | Python | agpl-3.0 | 158 |
#!/usr/bin/env python
"""
Card Explorer
Written by Colin Keigher
http://afreak.ca
All items in here are licensed under the LGPL (see LICENCE.TXT)
Release notes
====================================================================
X.X (Dec 15, 2012)
- pylint related fixes
- error checking on invalid stripes
0.4 (December 13, 2012)
- Checks against ISO/IEC 7812 upon initial scan.
- Improved the input mechanism so the data is sort of sanitised.
- Reading of financial cards works a bit better per ISO/IEC 7813.
- Added some items that will address rewards cards, but this has yet to be added to Main() function.
0.2 (December 6, 2012)
- Predictive input for a second track.
- Able to work with two types of financial cards. Some don't have more than two tracks.
- Moved service codes into the database.
- Added additional values to the IIN search in case items are missing.
- Rewrote the functions so it is a bit more organised.
- Made preparations for other types of card formats for a later version.
0.1 (December 5, 2012)
- Initial release. Includes IIN searching.
"""
import calendar
import sqlite3 as lite
"""
This is really the belly of the beast which will handle the scanning and then the determination of
what sort of card it might be.
"""
def Main():
# For now you can only scan financial cards that start with %B--this will change.
CardData = MainInput()
# Time to sort out the data here. What sort of card is this?
if CardData == "":
print "Invalid stripe: no data."
return -1
if CardData[0][:1] == chr(37): # If we start with a % in the start sentinal, let's work with the data this way.
if CardData[0][1:2] == "B": # Let's do this if we think it's ISO compliant.
if "^" in CardData[0]:
if int(CardData[0][2:3]) in [3, 4, 5, 6]:
print FCCardOutput(CardData[0][2:].split("^")) # Presuming it is a financial card here.
else:
print CardData[0][2:].split("^")
print CardData[0][2:3]
else:
MainExitMsg("Not recognised yet!")
else:
MainExitMsg("Not recognised yet!")
if CardData[:1] == ";": # Some cards use this as the sentinal.
if int(CardData[1:2]) in [4, 5, 6]:
if CardData.split("=")[1] == None:
MainExitMsg("Not recognised yet!")
else:
# This format I have not run across except with RBC cards. Any else? It lacks a name.
CardData = [ CardData.split("=")[0][1:], "No data", CardData.split("=")[1] ]
print FCCardOutput(CardData)
else:
MainExitMsg("Not recognised yet!")
print " "
def MainCardType(value):
return MainDBQuery("SELECT * FROM CardType WHERE ID=" + str(value))[1]
def MainInput():
print "Please swipe your card through the reader (hit enter to skip a track):"
InputStr = str(raw_input(">> "))
if InputStr[0:1] == chr(37): # Preparing for a two-track card scan.
InputStr = [ InputStr, str(raw_input(">> ")) ]
print " "
return InputStr
def MainExitMsg(msg):
print msg
quit()
def MainDisplayOutput(mtype, string):
print ""
print "Card type: ", MainCardType(mtype)
if mtype == 0:
OtherCard(string)
if mtype == 1:
FCCardOutput(string)
"""
Below are items specifically written for financial cards. This would include bank cards, credit cards,
and some forms of gift cards. The code below can be used for other types of cards too.
"""
# Generates a friendly card number in the format of XXXX XXXX XXXX XXXX and so forth.
# Will spit out a value if otherwise.
def FCFriendlyNumber(cnumber):
numlen = len(cnumber)
if numlen == 16: # This is meant for most cards (Visa, MC, Bank)
output = cnumber[0:4] + " " + cnumber[4:8] + " " + cnumber[8:12] + " " + cnumber[12:]
elif numlen == 15: # American Express has this format.
output = cnumber[0:4] + " " + cnumber[4:10] + " " + cnumber[10:]
elif numlen == 9: # Probably safe to display it as XXX XXX XXX
output = cnumber[0:3] + " " + cnumber[3:6] + " " + cnumber[6:]
else:
output = cnumber
return output
# Outputs a YYMM value from a card into a human-readable format.
def FCDateFormat(strdate):
output = calendar.month_name[int(strdate[2:])] + " " + strdate[0:2]
if int(strdate[2:]) > 12:
output = "Smarch? (fake value?)" # "Smarch" weather, while notoriously bad, does not exist!
return output
def FCCardOutput(strcard):
# There are some odd cards out there that may not be a financial card and therefore will not have a service code.
if len(strcard[2][4:7]) < 3:
FCServiceCode = "Odd format. Are you sure it is a financial card?"
else:
FCServiceCode = str(strcard[2][4:7]) + " (" + FCServiceDecode(strcard[2][4:7]) + ")"
print "Card type: ", ISOCardType(strcard[0][:1])
print "Card number: ", FCFriendlyNumber(strcard[0])
print "Card holder: ", strcard[1]
print "Expiration date: ", FCDateFormat(strcard[2][0:4])
print "Service code: ", FCServiceCode
print "Issuer: ", FCINNSearch(strcard[0][:6])
# Returns a friendly value for the card's service codes.
def FCServiceDecode(code):
if int(code) == 000:
return "No information"
else:
return FCServiceDecodeReturn(1, int(code[0:1])) + ", " + \
FCServiceDecodeReturn(2, int(code[1:2])) + ", " + \
FCServiceDecodeReturn(3, int(code[2:3]))
# This will be a lot better once I move all of this into the SQLite DB
def FCServiceDecodeReturn(numtype, digit):
return MainDBQuery("SELECT * FROM ServiceCode WHERE ID=" + str(numtype) + str(digit))[1]
# Makes a DB query using MainDBQuery() to pull an IIN.
# Starts off with a 6 digit search, then 4, then 2 all to provide an accurate result.
def FCINNSearch(string):
# We'll attempt to get something a bit more specific using 6-digits
try:
output = MainDBQuery("SELECT * FROM IINs WHERE IIN LIKE " + str(string))[1]
except:
# Should that fail, we'll attempt 4-digits
try:
output = MainDBQuery("SELECT * FROM IINs WHERE IIN LIKE " + str(string[0:4]))[1]
# We'll go generic or go broke! I'll add more as we go along.
except:
if int(string[0:2]) in [33, 34, 37]:
output = "American Express"
# Okay. So we know it's an AMEX, but what type?
if int(string[0:2]) == 37 and int(string[4:6]) in [66, 24, 26, 28]:
output = output + " (Gift card)"
elif int(string[0:2]) == 35:
output = "JCB Credit Card"
elif int(string[0:2]) == 36:
output = "Diners Club International"
elif int(string[0:2]) >= 40 and int(string[0:2]) <= 49:
output = "Visa"
elif int(string[0:2]) >= 50 and int(string[0:2]) <= 55:
output = "MasterCard"
elif int(string[0:2]) in [56, 67]:
output = "Maestro"
elif int(string[0:2]) in [57, 58, 66]:
output = "Possible bank card"
elif int(string[0:2]) in [60, 63]:
output = "Miscilaneous gift card or store credit card"
else:
output = "Unable to determine issuer"
return output
"""
Miscilaneous cards. Some of these will make perfect sense and others will not and therefore we have to guess?
Some of this includes generic functions.
"""
def ISOCardType(value):
return MainDBQuery("SELECT * FROM ISOCardTypes WHERE ID=" + str(value))[1]
# This is meant for working with rewards cards from stores and so forth.
def OtherCard(string):
print "Card number: ", FCFriendlyNumber(filter(lambda x: x.isdigit(), string[0])[OtherIIN(0, string[0]):])
print "IIN: ", OtherIIN(2, string[0])
print "Issuer: ", OtherIIN(1, string[0]) # This will be worked on for a later version.
# This will search IINs for values and their types
def OtherIIN(status, iin):
# Let's clear out the special characters left over so we can search.
iin = filter(lambda x: x.isdigit(), iin)
# Same as before, we'll try for six and if that fails, we'll go for four.
try:
output = MainDBQuery("SELECT * FROM IINs WHERE IIN LIKE " + str(iin)[:6])[1]
valid = 6
except:
# And if six fails, try four and then if not, fail.
try:
output = MainDBQuery("SELECT * FROM IINs WHERE IIN LIKE " + str(iin)[:4])[1]
valid = 4
except:
output = "Unable to locate IIN"
valid = 0
if status == 2:
if valid == 0:
return "Not found"
else:
return str(iin)[:valid]
if status == 1: return output
if status == 0: return valid
"""
Below is really meant for the meat and potatoes of the application.
It should start with the Main() function.
"""
# Needs error handling but it simplifies my queries
def MainDBQuery(query):
con = lite.connect("iin.sqlite")
cur = con.cursor()
cur.execute(query)
return cur.fetchone()
def MainErrorOut(message):
print ""
print message
exit(1)
# Just because I want to maintain version numbers
AppVer = 0.4
print "Card Explorer", str(AppVer)
print "Created by Colin Keigher (http://afreak.ca)"
print " "
Main()
| armyofevilrobots/Card-Explorer | issuer.py | Python | lgpl-3.0 | 9,686 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##---------------------------------------------------------------------------##
##
## Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
## Copyright (C) 2003 Mt. Hood Playing Card Co.
## Copyright (C) 2005-2009 Skomoroh
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##---------------------------------------------------------------------------##
import sys, os
from pysollib.settings import TOOLKIT, USE_TILE
if USE_TILE:
from pysollib.tile import ttk
from common import base_init_root_window, BaseTkSettings
def init_root_window(root, app):
base_init_root_window(root, app)
if TOOLKIT == 'gtk':
pass
elif USE_TILE:
theme = app.opt.tile_theme
style = ttk.Style(root)
if theme not in ('winnative', 'xpnative'):
color = style.lookup('.', 'background')
if color:
root.tk_setPalette(color)
##root.option_add('*Menu.foreground', 'black')
root.option_add('*Menu.activeBackground', '#08246b')
root.option_add('*Menu.activeForeground', 'white')
if theme == 'winnative':
style.configure('Toolbutton', padding=2)
else:
#root.option_add(...)
pass
class TkSettings(BaseTkSettings):
canvas_padding = (1, 1)
horizontal_toolbar_padding = (1, 0)
toolbar_relief = 'groove'
toolbar_borderwidth = 2
if USE_TILE:
toolbar_button_padding = (2, 0)
| TrevorLowing/PyGames | pysollib/winsystems/win32.py | Python | gpl-2.0 | 2,104 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import fields, osv
class res_partner_mail(osv.Model):
""" Update partner to add a field about notification preferences. Add a generic opt-out field that can be used
to restrict usage of automatic email templates. """
_name = "res.partner"
_inherit = ['res.partner', 'mail.thread']
_mail_flat_thread = False
_mail_mass_mailing = _('Customers')
_columns = {
'notify_email': fields.selection([
('none', 'Never'),
('always', 'All Messages'),
], 'Receive Inbox Notifications by Email', required=True,
oldname='notification_email_send',
help="Policy to receive emails for new messages pushed to your personal Inbox:\n"
"- Never: no emails are sent\n"
"- All Messages: for every notification you receive in your Inbox"),
'opt_out': fields.boolean('Opt-Out',
help="If opt-out is checked, this contact has refused to receive emails for mass mailing and marketing campaign. "
"Filter 'Available for Mass Mailing' allows users to filter the partners when performing mass mailing."),
}
_defaults = {
'notify_email': lambda *args: 'always',
'opt_out': False,
}
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(res_partner_mail, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for partner in self.browse(cr, uid, ids, context=context):
self._message_add_suggested_recipient(cr, uid, recipients, partner, partner=partner, reason=_('Partner Profile'))
return recipients
def message_get_default_recipients(self, cr, uid, ids, context=None):
return dict((id, {'partner_ids': [id], 'email_to': False, 'email_cc': False}) for id in ids)
| addition-it-solutions/project-all | addons/mail/res_partner.py | Python | agpl-3.0 | 2,880 |
#!/usr/bin/python
#coding:utf-8
import os
import sys
import time
import cPickle as pickle
BASE_PATH = "/root/exp/ibr/"
def get_host_num(path):
files = os.listdir(path)
for name in files:
if name.find('cgr_l') >= 0:
pos = name.find('.') + 1
return int(name[pos:])
return -1
def log_error(host_num, error_str):
dir_path = BASE_PATH + "log"
if not os.path.isdir(dir_path): os.makedirs(dir_path)
name = 'log_error.' + 'ibr-' + str(host_num)
file_path = os.path.join(dir_path, name)
with open(file_path, 'w+') as f:
f.write(error_str + '\n')
def rm_logdir():
dir_path = BASE_PATH + "log"
if os.path.isdir(dir_path):
cmd = 'rm -rf ' + dir_path
os.system(cmd)
def init(host_num, hosts2ports):
name = 'ibr-' + str(host_num)
port_id = hosts2ports[name][0]
interface = 'ns' + port_id[:11]
cmd = '/sbin/iptables -F & '
cmd = cmd + "/sbin/iptables -I INPUT -i %s -j DROP &"
rm_logdir()
if os.system(cmd % (interface,)):
error_str = 'Failed: ' + start_cmd
log_error(host_num, error_str)
sys.exit(-1)
def compare_cgr(cgr_old, cgr):
'''1: add the iptables item,host index is the seq
0: not change
-1: delete the ipatables item
'''
cgr_c = []
if len(cgr_old) != len(cgr): return cgr
for i,elem in enumerate(cgr):
if elem == 0:
if cgr_old[i] == 1:
cgr_c.append(-1)
else:
cgr_c.append(0)
elif elem == 1:
if cgr_old[i] == 1:
cgr_c.append(0)
else:
cgr_c.append(1)
return cgr_c
def topology_ctl(host_num, cgr_l, hosts2ports, test=False):
cgr_old = []
name = 'ibr-' + str(host_num)
port_id = hosts2ports[name][0]
interface = 'ns' + port_id[:11]
for cgr in cgr_l:
cgr_c = compare_cgr(cgr_old, cgr)
for index, state in enumerate(cgr_c):
name = 'ibr-' + str(index + 1)
src_ip = hosts2ports[name][1]
if state == -1:
command = '/sbin/iptables -D INPUT -i %s -p ip --src %s -j ACCEPT'
command = command % (interface, src_ip)
os.system(command)
elif state == 1:
command = '/sbin/iptables -I INPUT -i %s -p ip --src %s -j ACCEPT'
command = command % (interface, src_ip)
os.system(command)
cgr_old = cgr
yield True
def create_node_rec_command(dst_host, src_hosts):
'''dst_host is list index, start from 0,
dst_host is the host receiving dtn
src_hosts is list as [[src_index,0],.....]
dtnrecv --name src_dst_NUM --file /root/exp/ibr/rec/src_dst_NUM.txt
'''
send_count = {}
dst_name = 'ibr-' + str(dst_host + 1)
command = ""
dir_path = BASE_PATH + "rec/"
if not os.path.isdir(dir_path): os.makedirs(dir_path)
for src in src_hosts:
src_index = src[0]
src_name = 'ibr-' + str(src_index + 1)
if not send_count.has_key(src_name):
send_count[src_name] = 1
src_dst_num = src_name + "_" + dst_name +\
"_" + str(send_count[src_name])
command = command + '/usr/local/bin/dtnrecv --name ' + src_dst_num +\
" --file " + dir_path + src_dst_num + ".txt & "
send_count[src_name] = send_count[src_name] + 1
return command
def create_file(file_path, content, size):
'''size unit is Bytes'''
content = content + '\n'
left_content = ''
cnt = size/len(content)
if size%len(content): left_content = '1'*(size%len(content))
dir_path = os.path.dirname(file_path)
if not os.path.isdir(dir_path): os.makedirs(dir_path)
with open(file_path, 'w') as file_obj:
for i in range(cnt):
file_obj.write(content)
if left_content: file_obj.write(left_content)
def create_node_send_command(src_host, dst_hosts):
'''src_host is list index, start from 0,
src_host is the host sending dtn
dst_hosts is list [[host_index, time_index], [],...]
'''
send_count = {}
src_name = 'ibr-' + str(src_host + 1)
command = ""
i = 0
time_count = 0
while i < len(dst_hosts):
dst = dst_hosts[i]
time_index = dst[1]
if time_count >= time_index:
dst_index = dst[0]
dst_name = 'ibr-' + str(dst_index + 1)
if not send_count.has_key(dst_name):
send_count[dst_name] = 1
src_dst_num = src_name + "_" + dst_name +\
"_" + str(send_count[dst_name])
file_path = BASE_PATH + 'send/' + src_dst_num + '.txt'
create_file(file_path, src_dst_num, 1024)
cmd = '/usr/local/bin/dtnsend dtn://' + dst_name + '/' + src_dst_num +\
' ' + file_path + ' &'
send_count[dst_name] = send_count[dst_name] + 1
i = i + 1
time_count = time_count + 1
yield cmd
time_count = time_count + 1
yield ""
def setup_dtnd(host_num, hosts2ports):
name = 'ibr-' + str(host_num)
port_id = hosts2ports[name][0]
interface = 'ns' + port_id[:11]
start_cmd = "/usr/local/sbin/dtnd -c /etc/ibrdtn/ibrdtnd.conf -i %s &"
print start_cmd % (interface, )
if os.system(start_cmd % (interface,)):
error_str = 'Failed: ' + start_cmd
log_error(host_num, error_str)
sys.exit(-1)
def ibr_rec_send(host_num, hosts2ports, receivers, senders):
rec_cmd = create_node_rec_command(host_num-1, receivers)
if os.system(rec_cmd):
error_str = 'Failed: ' + rec_cmd
log_error(host_num, error_str)
sys.exit(-1)
send_cmd_iter = create_node_send_command(host_num-1, senders)
while True:
try:
send_cmd = send_cmd_iter.next()
if os.system(send_cmd):
rror_str = 'Failed: ' + send_cmd
log_error(host_num, error_str)
sys.exit(-1)
yield True
except StopIteration:
break
yield False
def main():
try:
start_time = float(sys.argv[1])
warm_time = int(sys.argv[2])
time_unit = int(sys.argv[3])
except:
help_info = "Usage:%s <start_time> <warm_time> <time_unit>(s)\n" % sys.argv[0]
sys.exit(-1)
base_path = "/tmp/"
while True:
host_num = get_host_num(base_path)
if host_num < 0:
time.sleep(1)
else:
break
print "Obtain host_num:%d" % (host_num,)
if os.system('/usr/bin/killdtn &'):
log_error(host_num, 'Failed: /usr/bin/killdtn')
sys.exit(-1)
path = base_path + 'cgr_l.' + str(host_num)
with open(path, 'rb') as f:
cgr_l = pickle.load(f)
print "Loaded %s!" % path
path = base_path + 'senders.' + str(host_num)
with open(path, 'rb') as f:
senders = pickle.load(f)
print "Loaded %s!" % path
path = base_path + 'receivers.' + str(host_num)
with open(path, 'rb') as f:
receivers = pickle.load(f)
print "Loaded %s!" % path
path = base_path + 'hosts2ports'
with open(path, 'rb') as f:
hosts2ports = pickle.load(f)
print "Loaded %s!" % path
init(host_num, hosts2ports)
print "Finished to init"
print "Waiting for starting"
while time.time() < start_time:
time.sleep(0.1)
print "Start now!!!"
print "Setup dtnd,Warm-up now!!!"
setup_dtnd(host_num, hosts2ports)
start_time = start_time + warm_time
while time.time() < (start_time):
time.sleep(0.1)
topology_iter = topology_ctl(host_num, cgr_l, hosts2ports)
rec_send_iter = ibr_rec_send(host_num, hosts2ports, receivers, senders)
rec_send_flag = True
count = 0
while True:
count = count +1
next_time = start_time + time_unit*count
try:
topology_iter.next()
except:
break
rec_send_iter.next()
if rec_send_flag:
try:
rec_send_flag = rec_send_iter.next()
except:
pass
while time.time() < next_time:
time.sleep(0.1)
#print time.time(), count
if __name__ == '__main__':
sys.exit(main())
| melon-li/netem | netem-agent.py | Python | apache-2.0 | 8,478 |
from types import MethodType
from inspect import getmodule
from ..simplified.modelapi import _require_metaattr
from restful_api import restful_api
from searchform import _create_seachform
from editform import _create_editform
def _copy_supports_metaattrs_from_simplified(cls):
""" Copy all supports_[method] boolean variables from the simplified class. """
for method in cls._meta.simplified._all_cruds_methods:
attrname = 'supports_{0}'.format(method)
setattr(cls, attrname, getattr(cls._meta.simplified, attrname))
def _create_get_foreignkey_fieldcls_method(cls):
def get_foreignkey_fieldcls(cls, fkfieldname):
""" Get the class stored at the ``fkfieldname`` key in the
``cls.foreignkey_fields``.
:return: None if not found, and a restful class if found.
"""
if not hasattr(cls, 'foreignkey_fields'):
return None
if not fkfieldname in cls.foreignkey_fields:
return None
fkrestfulcls = cls.foreignkey_fields[fkfieldname]
if isinstance(fkrestfulcls, str): # Support giving the class name as string if in the same module. For recursive foreign keys, such as Node.parentnode.
module = getmodule(cls)
return getattr(module, fkrestfulcls)
else:
return fkrestfulcls
setattr(cls._meta, get_foreignkey_fieldcls.__name__, MethodType(get_foreignkey_fieldcls, cls._meta))
def restful_modelapi(cls):
"""
:class:`ModelRestfulView` is used in conjunction with the
:class:`restful_modelapi`-decorator to autogenerate a RESTful
interface for a simplified class (see :ref:`simplified`).
The ``cls`` must have an inner class named ``Meta`` with
the following attributes:
simplified
A :ref:`simplified` class. **Required**.
foreignkey_fields
A dictionary mapping foreign key fields to RESTful classes
that contains the data for the foreign key field.
The decorator automatically decorates ``cls`` with
:func:`restful_api`.
The decorator adds the following attributes to ``cls``:
_meta
Alias for the Meta class (above).
supports_*
Copied from ``_meta.simplified._meta``.
SearchForm
A Django form that can be used to validate the keyword arguments sent
to the ``search()`` method in :ref:`simplified`.
EditForm
A Django model form that can be used to validate and edit the model
specified in :ref:`simplified` specified in ``_meta.simplfied._meta.model``.
"""
cls = restful_api(cls)
_require_metaattr(cls, 'simplified')
_create_seachform(cls)
if not hasattr(cls._meta, 'fake_editablefields_formfields'):
cls._meta.fake_editablefields_formfields = {}
_create_editform(cls)
_copy_supports_metaattrs_from_simplified(cls)
_create_get_foreignkey_fieldcls_method(cls)
return cls
| vegarang/devilry-django | devilry/restful/restful_modelapi.py | Python | bsd-3-clause | 2,970 |
"""
FEniCS tutorial demo program:
Poisson equation with Dirichlet and Neumann conditions.
As dn2_p2D.py, but the linear system is explicitly formed and solved.
-Laplace(u) = f on the unit square.
u = 1 + 2y^2 on x=0.
u = 2 + 2y^2 on x=1.
-du/dn = g on y=0 and y=1.
u = 1 + x^2 + 2y^2, f = -6, g = -4y.
"""
from dolfin import *
import numpy
# Create mesh and define function space
mesh = UnitSquare(2, 1)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define Dirichlet conditions for x=0 boundary
u_L = Expression('1 + 2*x[1]*x[1]')
class LeftBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[0]) < tol
Gamma_0 = DirichletBC(V, u_L, LeftBoundary())
# Define Dirichlet conditions for x=1 boundary
u_R = Expression('2 + 2*x[1]*x[1]')
class RightBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[0] - 1) < tol
Gamma_1 = DirichletBC(V, u_R, RightBoundary())
bcs = [Gamma_0, Gamma_1]
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(-6.0)
g = Expression('-4*x[1]')
a = inner(grad(u), grad(v))*dx
L = f*v*dx - g*v*ds
# Assemble and solve linear system
A = assemble(a)
b = assemble(L)
if mesh.num_cells() < 16:
print 'A = assemble(a); b = assemble(L)'
print 'A before incorporation of essential BC:\n', A.array()
print 'b before incorporation of essential BC:\n', b.array()
for bc in bcs:
bc.apply(A, b)
if mesh.num_cells() < 16:
print 'A after incorporation of essential BC:\n', A.array()
print 'b after incorporation of essential BC:\n', b.array()
# Alternative creation of the linear system
# (symmetric modification of boundary conditions)
A, b = assemble_system(a, L, bcs)
if mesh.num_cells() < 16:
print '\nA, b = assemble_system(a, L, bcs)'
print 'A after incorporation of essential BC:\n', A.array()
print 'b after incorporation of essential BC:\n', b.array()
# Compute solution
u = Function(V)
U = u.vector()
solve(A, U, b)
#plot(u)
print """
Solution of the Poisson problem -Laplace(u) = f,
with u = u0 on x=0,1 and -du/dn = g at y=0,1.
%s
""" % mesh
# Dump solution to the screen
u_nodal_values = u.vector()
u_array = u_nodal_values.array()
coor = mesh.coordinates()
for i in range(len(u_array)):
print 'u(%8g,%8g) = %g' % (coor[i][0], coor[i][1], u_array[i])
# Exact solution:
u_exact = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')
# Verification
u_e = interpolate(u_exact, V)
u_e_array = u_e.vector().array()
print 'Max error:', numpy.abs(u_e_array - u_array).max()
# Compare numerical and exact solution
center = (0.5, 0.5)
print 'numerical u at the center point:', u(center)
print 'exact u at the center point:', u_exact(center)
#interactive()
| maciekswat/dolfin_1.3.0 | test/unit/book/python/chapter_1_files/stationary/poisson/dn3_p2D.py | Python | gpl-3.0 | 2,868 |
# coding: utf-8
# In[16]:
import pandas as pd
import numpy as np
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import os
import csv
# In[17]:
print (os.getcwd())
# In[18]:
fn = "stops.txt"
with open(fn, "r") as f:
reader = csv.reader(f)
header = next(reader)
data = {}
for column in header:
data[column] = []
for row in reader:
for column, value in zip(header, row):
data[column].append(value)
# In[21]:
class Dataset:
def __init__(self, data):
self.data = data
def convert(self, column, dtype):
self.data[column] = np.array(self.data[column], dtype=dtype)
def columns(self):
return self.data.keys()
def filter_eq(self, column, value):
good = (self.data[column] == value)
new_data = {}
for column in self.data:
new_data[column] = self.data[column][good]
return Dataset(new_data)
def filter_lt(self, column, value):
good = (self.data[column] < value)
new_data = {}
for column in self.data:
new_data[column] = self.data[column][good]
return Dataset(new_data)
def filter_gt(self, column, value):
good = (self.data[column] > value)
new_data = {}
for column in self.data:
new_data[column] = self.data[column][good]
return Dataset(new_data)
def filter_ne(self, column, value):
good = (self.data[column] != value)
new_data = {}
for column in self.data:
new_data[column] = self.data[column][good]
return Dataset(new_data)
def size(self):
for key in self.data:
return self.data[key].size
def split(self, column):
new_datasets = {}
for split_value in np.unique(self.data[column]):
new_datasets[split_value] = self.filter_eq(column, split_value)
return new_datasets
def stats(self):
statistics = {}
for key in self.data:
if self.data[key].dtype not in ("float", "int"):
continue
values = self.data[key]
statistics[key] = (values.min(), values.max(), values.std(), values.mean())
return statistics
def compare(self, other):
stats1 = self.stats()
stats2 = other.stats()
for column in self.columns():
if column not in stats1: continue
print("Column '{0:25s}'".format(column))
for s1, s2 in zip(stats1[column], stats2[column]):
print(" {0} vs {1}".format(s1, s2))
def plot(self, x_column, y_column):
plt.plot(self.data[x_column], self.data[y_column], '.')
# In[73]:
header
# In[76]:
stopsdata= Dataset(data)
value_types = {'stop_ids': 'str',
'stop_code': 'str',
'stop_name':'str',
'stop_desc':'str',
'stop_lat':'float',
'stop_lon':'float',
'zone_id':'float',
'stop_url':'str',
'location_type':'str',
'parent_station':'str'}
for v in stopsdata.columns():
stopsdata.convert(v, value_types.get(v, "str"))
# In[135]:
plt.subplot(221)
plt.rcParams["figure.figsize"] = (20, 20)
plt.grid()
plt.xlabel("Longitude",fontsize=15)
plt.ylabel("Latitude",fontsize=15)
plt.title("Walkable areas in 1 minutes for each stop", fontsize=25)
plt.plot(data["stop_lon"],data["stop_lat"],c='#00ff80',marker='o',markersize=7,mec='none',ls='',alpha=0.05)
plt.subplot(222)
plt.rcParams["figure.figsize"] = (20, 20)
plt.grid()
plt.xlabel("Longitude",fontsize=15)
plt.ylabel("Latitude",fontsize=15)
plt.title("Walkable areas in 2 minutes for each stop", fontsize=25)
plt.plot(data["stop_lon"],data["stop_lat"],c='#80ff00',marker='o',markersize=15,mec='none',ls='',alpha=0.05)
plt.subplot(223)
plt.rcParams["figure.figsize"] = (20, 20)
plt.grid()
plt.xlabel("Longitude",fontsize=15)
plt.ylabel("Latitude",fontsize=15)
plt.title("Walkable areas in 5 minutes for each stop", fontsize=25)
plt.plot(data["stop_lon"],data["stop_lat"],c='#ffff00',marker='o',markersize=32,mec='none',ls='',alpha=0.05)
plt.subplot(224)
plt.rcParams["figure.figsize"] = (20, 20)
plt.grid()
plt.xlabel("Longitude",fontsize=15)
plt.ylabel("Latitude",fontsize=15)
plt.title("Walkable areas in 10 minutes for each stop", fontsize=25)
plt.plot(data["stop_lon"],data["stop_lat"],c='#ff0000',marker='o',markersize=65,mec='none',ls='',alpha=0.05)
# # explaination
# Accroding to the google map, 0.02 longitude at 40.06N latitude equals 1.1 miles. So the circle here represent a circle area with r=0.275mile which means an area which only take 5 minutes to walk.
# In[160]:
stats=stopsdata.stats()
plt.rcParams["figure.figsize"] = (20, 15)
stats=stopsdata.stats()
lon_min=stats["stop_lon"][0]
lon_max=stats["stop_lon"][1]
lat_min=stats["stop_lat"][0]
lat_max=stats["stop_lat"][1]
num_bins=16
lon=np.mgrid[lon_min:lon_max:(num_bins+1)*1j]
lat=np.mgrid[lat_min:lat_max:(num_bins+1)*1j]
tree_count=np.zeros((num_bins,num_bins))
for i in range(num_bins):
left_lat=lat[i]
right_lat=lat[i+1]
filter_lat_left=stopsdata.filter_gt("stop_lat",left_lat)
filter_lat_right=filter_lat_left.filter_lt("stop_lat",right_lat)
for j in range(num_bins):
left_lon=lon[j]
right_lon=lon[j+1]
filter_lon_left=filter_lat_right.filter_gt("stop_lon",left_lon)
filter_lon_right=filter_lon_left.filter_lt("stop_lon",right_lon)
tree_count[i,j] +=filter_lon_right.size()
#plt.xlim(lon_min,lon_max)
#plt.ylim(lat_min,lat_max)
plt.subplot(221)
plt.imshow(tree_count, extent=(lon_min,lon_max,lat_min,lat_max),origin="lower")
plt.xlabel("Longitude",fontsize=15)
plt.ylabel("Latitude",fontsize=15)
plt.title("The distribution of stops", fontsize=25)
color_bar=plt.colorbar()
color_bar.set_label("Count")
plt.subplot(222)
plt.imshow(tree_count, extent=(lon_min,lon_max,lat_min,lat_max),origin="lower",cmap =plt.cm.Blues)
plt.xlabel("Longitude",fontsize=15)
plt.ylabel("Latitude",fontsize=15)
plt.title("The distribution of stops", fontsize=25)
color_bar=plt.colorbar()
color_bar.set_label("Count")
plt.subplot(223)
plt.imshow(tree_count, extent=(lon_min,lon_max,lat_min,lat_max),origin="lower", cmap = plt.cm.afmhot)
plt.xlabel("Longitude",fontsize=15)
plt.ylabel("Latitude",fontsize=15)
plt.title("The distribution of stops", fontsize=25)
color_bar=plt.colorbar()
color_bar.set_label("Count")
plt.subplot(224)
plt.imshow(tree_count, extent=(lon_min,lon_max,lat_min,lat_max),origin="lower", cmap = plt.cm.BuGn)
plt.xlabel("Longitude",fontsize=15)
plt.ylabel("Latitude",fontsize=15)
plt.title("The distribution of stops", fontsize=25)
color_bar=plt.colorbar()
color_bar.set_label("Count")
# # Question
# How to let the ploting area be a perfect square.
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| yingjun2/project-spring2017 | part1/bin/Final+Project+Question+1+Xiaoliang+-+v+1.0.py | Python | bsd-3-clause | 6,913 |
from django.apps import AppConfig
from django.db.models.signals import post_save, post_delete
from django.conf import settings
class CustomerBillConfig(AppConfig):
name = 'retail.customer_bill'
def __init__(self, app_name, app_module):
super(self.__class__, self).__init__(app_name, app_module)
def ready(self):
import drf_nest.signals
from retail.customer_bill.models import CustomerBill
from retail.customer_bill.serializers import CustomerBillSerializer
exchange_prefix = settings.MQ_FRAMEWORK['EXCHANGE_PREFIX'] + self.name
exchange_header_list = ('status',)
post_save.connect( drf_nest.signals.notify_extra_args( serializer=CustomerBillSerializer,
exchange_prefix=exchange_prefix + ".CustomerBill",
exchange_header_list=exchange_header_list)(drf_nest.signals.notify_save_instance),
sender=CustomerBill, weak=False)
| Semprini/cbe-retail | retail/customer_bill/apps.py | Python | apache-2.0 | 1,070 |
"""
Dictionary of playerinfo that cannot be easily obtained while retrieving match pages
"""
players = dict(
Cruzerthebruzer=['top', 'Dignitas'],
Crumbzz=['jungle', 'Dignitas'],
Scarra=['mid', 'Dignitas'],
Imaqtpie=['adc', 'Dignitas'],
KiWiKiD=['support', 'Dignitas'],
Balls=['top', 'Cloud 9'],
Meteos=['jungle', 'Cloud 9'],
Hai=['mid', 'Cloud 9'],
Sneaky=['adc', 'Cloud 9'],
LemonNation=['support', 'Cloud 9'],
Dyrus=['top', 'TSM'],
TheOddOne=['jungle', 'TSM'],
Bjergsen=['mid', 'TSM'],
WildTurtle=['adc', 'TSM'],
Xpecial=['support', 'TSM'],
Nien=['top', 'CLG'],
Link=['mid', 'CLG'],
HotshotGG=['mid', 'CLG'],
Doublelift=['adc', 'CLG'],
Aphromoo=['support', 'CLG'],
Innox=['top', 'Evil Geniuses'],
Snoopeh=['jungle', 'Evil Geniuses'],
Pobelter=['mid', 'Evil Geniuses'],
Yellowpete=['adc', 'Evil Geniuses'],
Krepo=['support', 'Evil Geniuses'],
Benny=['top', 'XDG'],
Zuna=['adc', 'XDG'],
mancloud=['mid', 'XDG'],
Xmithie=['jungle', 'XDG'],
BloodWater=['support', 'XDG'],
ZionSpartan=['top', 'Coast'],
NintendudeX=['jungle', 'Coast'],
Shiphtur=['mid', 'Coast'],
WizFujiiN=['adc', 'Coast'],
Daydreamin=['support', 'Coast'],
Quas=['top', 'Curse'],
IWillDominate=['jungle', 'Curse'],
Voyboy=['mid', 'Curse'],
Cop=['adc', 'Curse'],
Zekent=['support', 'Curse'],
Wickd=['top', 'Alliance'],
Shook=['jungle', 'Alliance'],
Froggen=['mid', 'Alliance'],
Tabzz=['adc', 'Alliance'],
Nyph=['support', 'Alliance'],
sOAZ=['top', 'Fnatic'],
Cyanide=['jungle', 'Fnatic'],
xPeke=['mid', 'Fnatic'],
Rekkles=['adc', 'Fnatic'],
YellOwStaR=['support', 'Fnatic'],
Kev1n=['top', 'Millenium'],
Araneae=['jungle', 'Millenium'],
Kerp=['mid', 'Millenium'],
Creaton=['adc', 'Millenium'],
Jree=['support', 'Millenium'],
Xaxus=['top', 'ROCCAT'],
Jankos=['jungle', 'ROCCAT'],
Overpow=['mid', 'ROCCAT'],
Celaver=['adc', 'ROCCAT'],
VandeR=['support', 'ROCCAT'],
Darien=['top', 'Gambit'],
Diamond=['jungle', 'Gambit'],
Genja=['adc', 'Gambit'],
EDward=['support', 'Gambit'],
fredy122=['top', 'SK Gaming'],
Svenskeren=['jungle', 'SK Gaming'],
Jesiz=['mid', 'SK Gaming'],
CandyPanda=['adc', 'SK Gaming'],
nRated=['support', 'SK Gaming'],
YoungBuck=['top', 'Copenhagen Wolves'],
Amazing=['jungle', 'Copenhagen Wolves'],
cowTard=['mid', 'Copenhagen Wolves'],
Forg1ven=['adc', 'Copenhagen Wolves'],
Unlimited=['support', 'Copenhagen Wolves'],
Mimer=['top', 'Supa Hot Crew XD'],
Impaler=['jungle', 'Supa Hot Crew XD'],
Moopz=['mid', 'Supa Hot Crew XD'],
Migxa=['support', 'Supa Hot Crew XD'],
Chauster=['mid', 'CLG'],
Saintvicious=['support', 'Curse'],
dexter=['jungle', 'CLG'],
Nickwu=['jungle', 'XDG'],
Zorozero=['top', 'Gambit'],
Nukeduck=['mid', 'Gambit'],
Hulberto=['jungle', 'Gambit'],
fury=['adc', 'Gambit'],
Sheep=['support', 'XDG'],
Reginald=['mid', 'TSM'],
SELFIE=['mid', 'Supa Hot Crew XD'],
ROBERTxLEE=['adc', 'Evil Geniuses'],
Ken=['support', 'Evil Geniuses'],
Thinkcard=['jungle', 'Evil Geniuses'],
goldenglue=['mid', 'Dignitas'],
KottenX=['jungle', 'Millenium']
)
players['Mr RalleZ'] = ['adc', 'Supa Hot Crew XD']
players['Alex Ich'] =['mid', 'Gambit']
players['Bunny FuFuu'] = ['support', 'Curse']
playerinfo = players | johnttan/lolesports_scraper | playerinfo.py | Python | mit | 3,471 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Handling of the <message> element.
'''
from __future__ import print_function
import re
import six
from grit.node import base
from grit import clique
from grit import exception
from grit import lazy_re
from grit import tclib
from grit import util
# Matches exactly three dots ending a line or followed by whitespace.
_ELLIPSIS_PATTERN = lazy_re.compile(r'(?<!\.)\.\.\.(?=$|\s)')
_ELLIPSIS_SYMBOL = u'\u2026' # Ellipsis
# Finds whitespace at the start and end of a string which can be multiline.
_WHITESPACE = lazy_re.compile(r'(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z',
re.DOTALL | re.MULTILINE)
# <ph> placeholder elements should contain the special character formatters
# used to format <ph> element content.
# Android format.
_ANDROID_FORMAT = (r'%[1-9]+\$'
r'([-#+ 0,(]*)([0-9]+)?(\.[0-9]+)?'
r'([bBhHsScCdoxXeEfgGaAtT%n])')
# Chrome l10n format.
_CHROME_FORMAT = r'\$+\d'
# Windows EWT numeric and GRIT %s %d formats.
_OTHER_FORMAT = r'%[0-9sd]'
# Finds formatters that must be in a placeholder (<ph>) element.
_FORMATTERS = lazy_re.compile(
'(%s)|(%s)|(%s)' % (_ANDROID_FORMAT, _CHROME_FORMAT, _OTHER_FORMAT))
_BAD_PLACEHOLDER_MSG = ('ERROR: Placeholder formatter found outside of <ph> '
'tag in message "%s" in %s.')
_INVALID_PH_CHAR_MSG = ('ERROR: Invalid format characters found in message '
'"%s" <ph> tag in %s.')
# Finds HTML tag tokens.
_HTMLTOKEN = lazy_re.compile(r'<[/]?[a-z][a-z0-9]*[^>]*>', re.I)
# Finds HTML entities.
_HTMLENTITY = lazy_re.compile(r'&[^\s]*;')
class MessageNode(base.ContentNode):
'''A <message> element.'''
# For splitting a list of things that can be separated by commas or
# whitespace
_SPLIT_RE = lazy_re.compile(r'\s*,\s*|\s+')
def __init__(self):
super(MessageNode, self).__init__()
# Valid after EndParsing, this is the MessageClique that contains the
# source message and any translations of it that have been loaded.
self.clique = None
# We don't send leading and trailing whitespace into the translation
# console, but rather tack it onto the source message and any
# translations when formatting them into RC files or what have you.
self.ws_at_start = '' # Any whitespace characters at the start of the text
self.ws_at_end = '' # --"-- at the end of the text
# A list of "shortcut groups" this message is in. We check to make sure
# that shortcut keys (e.g. &J) within each shortcut group are unique.
self.shortcut_groups_ = []
# Formatter-specific data used to control the output of individual strings.
# formatter_data is a space separated list of C preprocessor-style
# definitions. Names without values are given the empty string value.
# Example: "foo=5 bar baz=100"
self.formatter_data = {}
# Whether or not to convert ... -> U+2026 within Translate().
self._replace_ellipsis = False
def _IsValidChild(self, child):
return isinstance(child, (PhNode))
def _IsValidAttribute(self, name, value):
if name not in ['name', 'offset', 'translateable', 'desc', 'meaning',
'internal_comment', 'shortcut_groups', 'custom_type',
'validation_expr', 'use_name_for_id', 'sub_variable',
'formatter_data']:
return False
if (name in ('translateable', 'sub_variable') and
value not in ['true', 'false']):
return False
return True
def SetReplaceEllipsis(self, value):
r'''Sets whether to replace ... with \u2026.
'''
self._replace_ellipsis = value
def MandatoryAttributes(self):
return ['name|offset']
def DefaultAttributes(self):
return {
'custom_type' : '',
'desc' : '',
'formatter_data' : '',
'internal_comment' : '',
'meaning' : '',
'shortcut_groups' : '',
'sub_variable' : 'false',
'translateable' : 'true',
'use_name_for_id' : 'false',
'validation_expr' : '',
}
def HandleAttribute(self, attrib, value):
base.ContentNode.HandleAttribute(self, attrib, value)
if attrib != 'formatter_data':
return
# Parse value, a space-separated list of defines, into a dict.
# Example: "foo=5 bar" -> {'foo':'5', 'bar':''}
for item in value.split():
name, _, val = item.partition('=')
self.formatter_data[name] = val
def GetTextualIds(self):
'''
Returns the concatenation of the parent's node first_id and
this node's offset if it has one, otherwise just call the
superclass' implementation
'''
if 'offset' not in self.attrs:
return super(MessageNode, self).GetTextualIds()
# we search for the first grouping node in the parents' list
# to take care of the case where the first parent is an <if> node
grouping_parent = self.parent
import grit.node.empty
while grouping_parent and not isinstance(grouping_parent,
grit.node.empty.GroupingNode):
grouping_parent = grouping_parent.parent
assert 'first_id' in grouping_parent.attrs
return [grouping_parent.attrs['first_id'] + '_' + self.attrs['offset']]
def IsTranslateable(self):
return self.attrs['translateable'] == 'true'
def EndParsing(self):
super(MessageNode, self).EndParsing()
# Make the text (including placeholder references) and list of placeholders,
# verify placeholder formats, then strip and store leading and trailing
# whitespace and create the tclib.Message() and a clique to contain it.
text = ''
placeholders = []
for item in self.mixed_content:
if isinstance(item, six.string_types):
# Not a <ph> element: fail if any <ph> formatters are detected.
if _FORMATTERS.search(item):
print(_BAD_PLACEHOLDER_MSG % (item, self.source))
raise exception.PlaceholderNotInsidePhNode
text += item
else:
# Extract the <ph> element components.
presentation = item.attrs['name'].upper()
text += presentation
ex = ' ' # <ex> example element cdata if present.
if len(item.children):
ex = item.children[0].GetCdata()
original = item.GetCdata()
# Sanity check the <ph> element content.
cdata = original
# Replace all HTML tag tokens in cdata.
match = _HTMLTOKEN.search(cdata)
while match:
cdata = cdata.replace(match.group(0), '_')
match = _HTMLTOKEN.search(cdata)
# Replace all HTML entities in cdata.
match = _HTMLENTITY.search(cdata)
while match:
cdata = cdata.replace(match.group(0), '_')
match = _HTMLENTITY.search(cdata)
# Remove first matching formatter from cdata.
match = _FORMATTERS.search(cdata)
if match:
cdata = cdata.replace(match.group(0), '')
# Fail if <ph> special chars remain in cdata.
if re.search(r'[%\$]', cdata):
message_id = self.attrs['name'] + ' ' + original;
print(_INVALID_PH_CHAR_MSG % (message_id, self.source))
raise exception.InvalidCharactersInsidePhNode
# Otherwise, accept this <ph> placeholder.
placeholders.append(tclib.Placeholder(presentation, original, ex))
m = _WHITESPACE.match(text)
if m:
self.ws_at_start = m.group('start')
self.ws_at_end = m.group('end')
text = m.group('body')
self.shortcut_groups_ = self._SPLIT_RE.split(self.attrs['shortcut_groups'])
self.shortcut_groups_ = [i for i in self.shortcut_groups_ if i != '']
description_or_id = self.attrs['desc']
if description_or_id == '' and 'name' in self.attrs:
description_or_id = 'ID: %s' % self.attrs['name']
assigned_id = None
if self.attrs['use_name_for_id'] == 'true':
assigned_id = self.attrs['name']
message = tclib.Message(text=text, placeholders=placeholders,
description=description_or_id,
meaning=self.attrs['meaning'],
assigned_id=assigned_id)
self.InstallMessage(message)
def InstallMessage(self, message):
'''Sets this node's clique from a tclib.Message instance.
Args:
message: A tclib.Message.
'''
self.clique = self.UberClique().MakeClique(message, self.IsTranslateable())
for group in self.shortcut_groups_:
self.clique.AddToShortcutGroup(group)
if self.attrs['custom_type'] != '':
self.clique.SetCustomType(util.NewClassInstance(self.attrs['custom_type'],
clique.CustomType))
elif self.attrs['validation_expr'] != '':
self.clique.SetCustomType(
clique.OneOffCustomType(self.attrs['validation_expr']))
def SubstituteMessages(self, substituter):
'''Applies substitution to this message.
Args:
substituter: a grit.util.Substituter object.
'''
message = substituter.SubstituteMessage(self.clique.GetMessage())
if message is not self.clique.GetMessage():
self.InstallMessage(message)
def GetCliques(self):
return [self.clique] if self.clique else []
def Translate(self, lang):
'''Returns a translated version of this message.
'''
assert self.clique
msg = self.clique.MessageForLanguage(lang,
self.PseudoIsAllowed(),
self.ShouldFallbackToEnglish()
).GetRealContent()
if self._replace_ellipsis:
msg = _ELLIPSIS_PATTERN.sub(_ELLIPSIS_SYMBOL, msg)
# Always remove all byte order marks (\uFEFF) https://crbug.com/1033305
msg = msg.replace(u'\uFEFF','')
return msg.replace('[GRITLANGCODE]', lang)
def NameOrOffset(self):
key = 'name' if 'name' in self.attrs else 'offset'
return self.attrs[key]
def ExpandVariables(self):
'''We always expand variables on Messages.'''
return True
def GetDataPackValue(self, lang, encoding):
'''Returns a str represenation for a data_pack entry.'''
message = self.ws_at_start + self.Translate(lang) + self.ws_at_end
return util.Encode(message, encoding)
def IsResourceMapSource(self):
return True
@staticmethod
def Construct(parent, message, name, desc='', meaning='', translateable=True):
'''Constructs a new message node that is a child of 'parent', with the
name, desc, meaning and translateable attributes set using the same-named
parameters and the text of the message and any placeholders taken from
'message', which must be a tclib.Message() object.'''
# Convert type to appropriate string
translateable = 'true' if translateable else 'false'
node = MessageNode()
node.StartParsing('message', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('desc', desc)
node.HandleAttribute('meaning', meaning)
node.HandleAttribute('translateable', translateable)
items = message.GetContent()
for ix, item in enumerate(items):
if isinstance(item, six.string_types):
# Ensure whitespace at front and back of message is correctly handled.
if ix == 0:
item = "'''" + item
if ix == len(items) - 1:
item = item + "'''"
node.AppendContent(item)
else:
phnode = PhNode()
phnode.StartParsing('ph', node)
phnode.HandleAttribute('name', item.GetPresentation())
phnode.AppendContent(item.GetOriginal())
if len(item.GetExample()) and item.GetExample() != ' ':
exnode = ExNode()
exnode.StartParsing('ex', phnode)
exnode.AppendContent(item.GetExample())
exnode.EndParsing()
phnode.AddChild(exnode)
phnode.EndParsing()
node.AddChild(phnode)
node.EndParsing()
return node
class PhNode(base.ContentNode):
'''A <ph> element.'''
def _IsValidChild(self, child):
return isinstance(child, ExNode)
def MandatoryAttributes(self):
return ['name']
def EndParsing(self):
super(PhNode, self).EndParsing()
# We only allow a single example for each placeholder
if len(self.children) > 1:
raise exception.TooManyExamples()
def GetTextualIds(self):
# The 'name' attribute is not an ID.
return []
class ExNode(base.ContentNode):
'''An <ex> element.'''
pass
| endlessm/chromium-browser | tools/grit/grit/node/message.py | Python | bsd-3-clause | 12,578 |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
import os
import shutil
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': '' }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
os.rename(wallet_dir("wallet.dat"), wallet_dir("w8"))
# create another dummy wallet for use in testing backups later
self.start_node(0, [])
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_dir("wallet.dat"), empty_wallet)
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly
# '' - to verify default wallet file is created correctly
wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', '']
extra_args = ['-wallet={}'.format(n) for n in wallet_names]
self.start_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8'])
assert_equal(set(node.listwallets()), set(wallet_names))
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
# should not initialize if wallet path can't be created
exp_stderr = "boost::filesystem::create_directory:"
self.nodes[0].assert_start_raises_init_error(['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
# should not initialize if there are duplicate wallets
self.nodes[0].assert_start_raises_init_error(['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
exp_stderr = "BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], 'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -zapwallettxes with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=1', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=2', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.log.info("Do not allow -salvagewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0, ['-wallet=w4', '-wallet=w5'])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
exp_stderr = "Error: Error initializing wallet database environment \"\S+competing_walletdir\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy'])
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], "regtest")
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(4.0)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 4.0)
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
assert_raises_rpc_error(-4, 'Wallet file verification failed: Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0])
# Fail to load duplicate wallets by different ways (directory and filepath)
assert_raises_rpc_error(-4, "Wallet file verification failed: Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat')
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', 'w9'])
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
| droark/bitcoin | test/functional/wallet_multiwallet.py | Python | mit | 17,262 |
# Copyright (c) 2015 Telefonica I+D.
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.test import attr
from murano_tempest_tests.tests.api.application_catalog import base
from murano_tempest_tests import utils
class TestEnvironmentTemplatesSanity(base.BaseApplicationCatalogTest):
@attr(type='smoke')
def test_list_empty_env_templates(self):
templates_list = self.application_catalog_client.\
get_env_templates_list()
self.assertIsInstance(templates_list, list)
@attr(type='smoke')
def test_create_and_delete_env_template(self):
name = utils.generate_name('create_and_delete_env_template')
env_template = self.application_catalog_client.\
create_env_template(name)
self.assertFalse(env_template['is_public'])
self.assertEqual(name, env_template['name'])
env_templates_list = self.application_catalog_client.\
get_env_templates_list()
self.assertIn(env_template, env_templates_list)
self.application_catalog_client.\
delete_env_template(env_template['id'])
env_templates_list = self.application_catalog_client.\
get_env_templates_list()
self.assertNotIn(env_template, env_templates_list)
class TestEnvironmentTemplates(base.BaseApplicationCatalogTest):
@classmethod
def resource_setup(cls):
super(TestEnvironmentTemplates, cls).resource_setup()
name = utils.generate_name(cls.__name__)
cls.env_template = cls.application_catalog_client.\
create_public_env_template(name)
cls.alt_client = cls.get_client_with_isolated_creds('alt')
@classmethod
def resource_cleanup(cls):
cls.application_catalog_client.\
delete_env_template(cls.env_template['id'])
super(TestEnvironmentTemplates, cls).resource_cleanup()
@attr(type='smoke')
def test_get_env_template(self):
env_template = self.application_catalog_client.\
get_env_template(self.env_template['id'])
self.assertEqual(self.env_template['name'], env_template['name'])
@attr(type='smoke')
def test_create_env_template_with_a_service(self):
name = utils.generate_name('create_env_template_with_service')
post_body = self._get_demo_app()
env_template = self.application_catalog_client.\
create_env_template_with_services(name, post_body)
self.addCleanup(self.application_catalog_client.delete_env_template,
env_template['id'])
list_services = self.application_catalog_client.\
get_services_list_in_env_template(env_template['id'])
self.assertIsInstance(list_services, list)
self.assertIn(post_body, list_services)
@attr(type='smoke')
def test_add_and_remove_service_in_env_templates(self):
env_template_services = self.application_catalog_client.\
get_services_list_in_env_template(self.env_template['id'])
self.assertIsInstance(env_template_services, list)
post_body = self._get_demo_app()
service = self.application_catalog_client.\
create_service_in_env_template(self.env_template['id'], post_body)
self.assertEqual(post_body['name'], service['name'])
services = self.application_catalog_client.\
get_services_list_in_env_template(self.env_template['id'])
self.assertIn(service, services)
self.application_catalog_client.\
delete_service_from_env_template(self.env_template['id'],
service['?']['id'])
services = self.application_catalog_client.\
get_services_list_in_env_template(self.env_template['id'])
self.assertNotIn(service, services)
@attr(type='smoke')
def test_create_public_env_template(self):
name = utils.generate_name('create_public_env_template')
env_template = self.application_catalog_client.\
create_public_env_template(name)
self.addCleanup(self.application_catalog_client.delete_env_template,
env_template['id'])
self.assertEqual(name, env_template['name'])
env_temp = self.application_catalog_client.\
get_env_template(env_template['id'])
self.assertTrue(env_temp['is_public'])
@attr(type='smoke')
def test_clone_env_template(self):
name = utils.generate_name('clone_env_template')
cloned_template = self.alt_client.\
clone_env_template(self.env_template['id'], name)
self.addCleanup(self.alt_client.delete_env_template,
cloned_template['id'])
self.assertEqual(name, cloned_template['name'])
template = self.alt_client.get_env_template(cloned_template['id'])
self.assertEqual(name, template['name'])
@attr(type='smoke')
def test_get_public_private_both_env_templates(self):
name = utils.generate_name('get_public_private_both')
public_env_template = self.application_catalog_client.\
create_public_env_template(name)
self.addCleanup(self.application_catalog_client.delete_env_template,
public_env_template['id'])
self.assertTrue(public_env_template['is_public'])
private_name = utils.generate_name('get_public_private_both')
private_env_template = self.application_catalog_client.\
create_env_template(private_name)
self.addCleanup(self.application_catalog_client.delete_env_template,
private_env_template['id'])
self.assertFalse(private_env_template['is_public'])
private_name_alt = utils.generate_name('get_public_private_both')
private_alt_env_template = self.alt_client.\
create_env_template(private_name_alt)
self.addCleanup(self.alt_client.delete_env_template,
private_alt_env_template['id'])
public_env_templates = self.application_catalog_client.\
get_public_env_templates_list()
self.assertIn(public_env_template, public_env_templates)
self.assertNotIn(private_env_template, public_env_templates)
self.assertNotIn(private_alt_env_template, public_env_templates)
private_env_templates = self.application_catalog_client.\
get_private_env_templates_list()
self.assertNotIn(public_env_template, private_env_templates)
self.assertIn(private_env_template, private_env_templates)
self.assertNotIn(private_alt_env_template, private_env_templates)
env_templates = self.application_catalog_client.\
get_env_templates_list()
self.assertIn(public_env_template, env_templates)
self.assertIn(private_env_template, env_templates)
self.assertNotIn(private_alt_env_template, env_templates)
alt_pub_templates = self.alt_client.get_public_env_templates_list()
self.assertIn(public_env_template, alt_pub_templates)
self.assertNotIn(private_env_template, alt_pub_templates)
self.assertNotIn(private_alt_env_template, alt_pub_templates)
alt_priv_templates = self.alt_client.get_private_env_templates_list()
self.assertNotIn(public_env_template, alt_priv_templates)
self.assertNotIn(private_env_template, alt_priv_templates)
self.assertIn(private_alt_env_template, alt_priv_templates)
alt_env_templates = self.alt_client.get_env_templates_list()
self.assertIn(public_env_template, alt_env_templates)
self.assertNotIn(private_env_template, alt_env_templates)
self.assertIn(private_alt_env_template, alt_env_templates)
@attr(type='smoke')
def test_create_env_from_template(self):
name = utils.generate_name('create_env_from_template')
env_template = self.application_catalog_client.\
create_public_env_template(name)
self.addCleanup(self.application_catalog_client.delete_env_template,
env_template['id'])
post_body = self._get_demo_app()
service = self.application_catalog_client.\
create_service_in_env_template(env_template['id'], post_body)
self.assertEqual(post_body['name'], service['name'])
env_name = utils.generate_name('create_env_from_template')
environment = self.application_catalog_client.\
create_env_from_template(env_template['id'], env_name)
self.addCleanup(self.application_catalog_client.delete_environment,
environment['environment_id'])
self.assertIsNotNone(environment)
service_from_env = self.application_catalog_client.\
get_service(environment['environment_id'],
service['?']['id'],
environment['session_id'])
self.assertEqual(service, service_from_env)
| satish-avninetworks/murano | murano_tempest_tests/tests/api/application_catalog/test_env_templates.py | Python | apache-2.0 | 9,494 |
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/rigid_body.py")
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
if __name__ == "__main__":
ut.main()
| espressomd/espresso | testsuite/scripts/samples/test_rigid_body.py | Python | gpl-3.0 | 983 |
# Esperienza sull'acquisizione di d.d.p. tramite Arduino.
# Lo script scrive sulla porta seriale a cui e' collegato arduino un numero che viene interpretato da Arduino come ritardo in unita' di 10 ms e fa partire l'acquisizione.
# Poi attende l'arrivo dei dati elaborati da Arduino sulla seriale e li salva in un file.
import serial # libreria per gestione porta seriale (USB)
import time # libreria per temporizzazione
nacqs = 1 # numero di acquisizioni da registrare (ognuna da 600 punti)
Directory='../dati_arduino/' # nome directory dove salvare i file dati
FileName=(Directory+'data.txt') # nomina il file dati <<<< DA CAMBIARE SECONDO GUSTO
outputFile = open(FileName, "w+" ) # apre file dati in scrittura
for j in range (1,nacqs+1):
print('Apertura della porta seriale\n') # scrive sulla console (terminale)
ard=serial.Serial('/dev/ttyACM0',9600) # apre la porta seriale (da controllare come viene denominata, in genere /dev/ttyACM0)
time.sleep(2) # aspetta due secondi per evitare casini
ard.write(b'1') # intervallo (ritardo) tra le acqusizioni in unita' di 10 ms <<<< questo si puo' cambiare (default messo a 10 ms)
print('Start Acquisition ',j, ' of ',nacqs) # scrive sulla console (terminale)
# loop lettura dati da seriale (sono 600 righe, eventualmente da aggiustare)
for i in range (0,600):
data = ard.readline().decode() # legge il dato e lo decodifica
if data:
outputFile.write(data) # scrive i dati nel file
ard.close() # chiude la comunicazione seriale con Arduino
print('Acquisition ',j,' completed\n') # scrive sulla console (terminale)
outputFile.close() # chiude il file dei dati
print('End')
| fedebell/Laboratorio3 | Laboratorio2/scriptACaso/ardu_multicount_v1.py | Python | gpl-3.0 | 1,880 |
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.brain import BrainFileConfiguration
class BasicTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(BasicTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration._aiml_files = BrainFileConfiguration(file=os.path.dirname(__file__)+os.sep+'star_udc.aiml')
class UDCAIMLTests(unittest.TestCase):
def setUp(self):
UDCAIMLTests.test_client = BasicTestClient()
def test_udc_multi_word_question(self):
response = UDCAIMLTests.test_client.bot.ask_question("test", "Ask Question")
self.assertIsNotNone(response)
self.assertEqual(response, "UDC Star Response")
def test_udc_single_word_question(self):
response = UDCAIMLTests.test_client.bot.ask_question("test", "Question")
self.assertIsNotNone(response)
self.assertEqual(response, "UDC Star Response")
def test_udc_empty_string_question(self):
response = UDCAIMLTests.test_client.bot.ask_question("test", "")
self.assertIsNotNone(response)
self.assertEqual(response, "")
| dkamotsky/program-y | src/test/aiml_tests/udc_tests/star/test_star_udc_aiml.py | Python | mit | 1,236 |
"""Installation script."""
from os import path
from setuptools import find_packages, setup
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.rst')) as f:
LONG_DESCRIPTION = f.read().strip()
setup(
name='fuel',
version='0.0.1', # PEP 440 compliant
description='Data pipeline framework for machine learning',
long_description=LONG_DESCRIPTION,
url='https://github.com/mila-udem/fuel.git',
author='Universite de Montreal',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Utilities',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='dataset data iteration pipeline processing',
packages=find_packages(exclude=['tests']),
install_requires=['six', 'picklable_itertools', 'pyyaml', 'h5py',
'tables', 'progressbar2', 'pyzmq', 'scipy', 'pillow',
'requests'],
extras_require={
'test': ['nose', 'nose2', 'mock']
},
scripts=['bin/fuel-convert', 'bin/fuel-download', 'bin/fuel-info']
)
| laurent-dinh/fuel | setup.py | Python | mit | 1,421 |
"""Support for Z-Wave controls using the number platform."""
from __future__ import annotations
from zwave_js_server.client import Client as ZwaveClient
from homeassistant.components.number import DOMAIN as NUMBER_DOMAIN, NumberEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_CLIENT, DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .entity import ZWaveBaseEntity
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Z-Wave Number entity from Config Entry."""
client: ZwaveClient = hass.data[DOMAIN][config_entry.entry_id][DATA_CLIENT]
@callback
def async_add_number(info: ZwaveDiscoveryInfo) -> None:
"""Add Z-Wave number entity."""
entities: list[ZWaveBaseEntity] = []
if info.platform_hint == "volume":
entities.append(ZwaveVolumeNumberEntity(config_entry, client, info))
else:
entities.append(ZwaveNumberEntity(config_entry, client, info))
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
f"{DOMAIN}_{config_entry.entry_id}_add_{NUMBER_DOMAIN}",
async_add_number,
)
)
class ZwaveNumberEntity(ZWaveBaseEntity, NumberEntity):
"""Representation of a Z-Wave number entity."""
def __init__(
self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo
) -> None:
"""Initialize a ZwaveNumberEntity entity."""
super().__init__(config_entry, client, info)
if self.info.primary_value.metadata.writeable:
self._target_value = self.info.primary_value
else:
self._target_value = self.get_zwave_value("targetValue")
# Entity class attributes
self._attr_name = self.generate_name(
include_value_name=True, alternate_value_name=info.platform_hint
)
@property
def min_value(self) -> float:
"""Return the minimum value."""
if self.info.primary_value.metadata.min is None:
return 0
return float(self.info.primary_value.metadata.min)
@property
def max_value(self) -> float:
"""Return the maximum value."""
if self.info.primary_value.metadata.max is None:
return 255
return float(self.info.primary_value.metadata.max)
@property
def value(self) -> float | None:
"""Return the entity value."""
if self.info.primary_value.value is None:
return None
return float(self.info.primary_value.value)
@property
def unit_of_measurement(self) -> str | None:
"""Return the unit of measurement of this entity, if any."""
if self.info.primary_value.metadata.unit is None:
return None
return str(self.info.primary_value.metadata.unit)
async def async_set_value(self, value: float) -> None:
"""Set new value."""
await self.info.node.async_set_value(self._target_value, value)
class ZwaveVolumeNumberEntity(ZWaveBaseEntity, NumberEntity):
"""Representation of a volume number entity."""
def __init__(
self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo
) -> None:
"""Initialize a ZwaveVolumeNumberEntity entity."""
super().__init__(config_entry, client, info)
self.correction_factor = int(
self.info.primary_value.metadata.max - self.info.primary_value.metadata.min
)
# Fallback in case we can't properly calculate correction factor
if self.correction_factor == 0:
self.correction_factor = 1
# Entity class attributes
self._attr_min_value = 0
self._attr_max_value = 1
self._attr_step = 0.01
self._attr_name = self.generate_name(include_value_name=True)
@property
def value(self) -> float | None:
"""Return the entity value."""
if self.info.primary_value.value is None:
return None
return float(self.info.primary_value.value) / self.correction_factor
async def async_set_value(self, value: float) -> None:
"""Set new value."""
await self.info.node.async_set_value(
self.info.primary_value, round(value * self.correction_factor)
)
| sander76/home-assistant | homeassistant/components/zwave_js/number.py | Python | apache-2.0 | 4,601 |
# -*- coding: utf-8 -*-
"""
Created on oct. 19, 2014, 21:04
Copyright François Durand 2014
[email protected]
This file is part of SVVAMP.
SVVAMP is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SVVAMP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SVVAMP. If not, see <http://www.gnu.org/licenses/>.
"""
class MyLog:
"""Object that can send simple log messages."""
def __init__(self, log_identity='MYLOG', log_depth=0):
self.log_depth = log_depth
"""Level of depth"""
self.log_identity = log_identity
"""Name of the MyLog object. Will appear at the beginning of each log message."""
def mylog(self, message="I am alive", detail_level=1):
"""Print a log message.
Parameters
----------
message : str
The message to display.
detail_level : int
The level of detail of the message. The more it is, the less important is the message. Typically:
* 1: Beginning of a method (except a simple get).
* 2: Big steps of calculation.
* 3: Computations inside loop (very verbose log).
It is not recommended to use detail_level = 0 or lower.
Examples
--------
>>> from svvamp.utils.my_log import MyLog
>>> my_log_object = MyLog(log_identity="COMMENDATORE", log_depth=3)
>>> my_log_object.mylog("Don Giovanni!", 1)
COMMENDATORE: Don Giovanni!
"""
if detail_level <= self.log_depth:
print(self.log_identity + ": " + message)
def mylogv(self, message="Variable =", variable=None, detail_level=1):
"""Print a log message with the value of a variable.
Parameters
----------
message : str
variable : object
Variable to be displayed.
detail_level : int
Cf. :meth:`mylog`.
Examples
--------
>>> from svvamp.utils.my_log import MyLog
>>> my_log_object = MyLog(log_identity="HITCHHIKER", log_depth=3)
>>> my_log_object.mylogv("The answer is", 42)
HITCHHIKER: The answer is 42
"""
if detail_level <= self.log_depth:
print(self.log_identity + ": " + message, variable)
def mylogm(self, message="Variable =", variable=None, detail_level=1):
"""Print a log message with the value of a variable, typically a matrix.
This method is well suited for a matrix because it skips to next line before printing the variable.
Parameters
----------
message : str
variable : object
Variable to be displayed.
detail_level : int
Cf. :meth:`mylog`.
Examples
--------
>>> from svvamp.utils.my_log import MyLog
>>> import numpy as np
>>> my_log_object = MyLog(log_identity="MAGIC_SQUARE", log_depth=3)
>>> my_log_object.mylogm("A nice matrix:", np.array([[2, 7, 6], [9, 5, 1], [4, 3, 8]]))
MAGIC_SQUARE: A nice matrix:
[[2 7 6]
[9 5 1]
[4 3 8]]
"""
if detail_level <= self.log_depth:
print(self.log_identity + ": " + message)
print(variable)
| francois-durand/svvamp | svvamp/utils/my_log.py | Python | gpl-3.0 | 3,739 |
"""
Tests the crowdsourced hinter xmodule.
"""
from mock import Mock, MagicMock
import unittest
import copy
from xmodule.crowdsource_hinter import CrowdsourceHinterModule
from xmodule.vertical_module import VerticalModule, VerticalDescriptor
from xblock.field_data import DictFieldData
from xblock.fragment import Fragment
from xblock.core import XBlock
from . import get_test_system
import json
class CHModuleFactory(object):
"""
Helps us make a CrowdsourceHinterModule with the specified internal
state.
"""
sample_problem_xml = """
<?xml version="1.0"?>
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown="A numerical input problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value. The answer is correct if it is within a specified numerical tolerance of the expected answer. Enter the number of fingers on a human hand: = 5 [explanation] If you look at your hand, you can count that you have five fingers. [explanation] " rerandomize="never" showanswer="finished">
<p>A numerical input problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value.</p>
<p>The answer is correct if it is within a specified numerical tolerance of the expected answer.</p>
<p>Enter the number of fingers on a human hand:</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
"""
num = 0
@staticmethod
def next_num():
"""
Helps make unique names for our mock CrowdsourceHinterModule's
"""
CHModuleFactory.num += 1
return CHModuleFactory.num
@staticmethod
def create(hints=None,
previous_answers=None,
user_submissions=None,
user_voted=None,
moderate=None,
mod_queue=None):
"""
A factory method for making CHM's
"""
# Should have a single child, but it doesn't matter what that child is
field_data = {'data': CHModuleFactory.sample_problem_xml, 'children': [None]}
if hints is not None:
field_data['hints'] = hints
else:
field_data['hints'] = {
'24.0': {'0': ['Best hint', 40],
'3': ['Another hint', 30],
'4': ['A third hint', 20],
'6': ['A less popular hint', 3]},
'25.0': {'1': ['Really popular hint', 100]}
}
if mod_queue is not None:
field_data['mod_queue'] = mod_queue
else:
field_data['mod_queue'] = {
'24.0': {'2': ['A non-approved hint']},
'26.0': {'5': ['Another non-approved hint']}
}
if previous_answers is not None:
field_data['previous_answers'] = previous_answers
else:
field_data['previous_answers'] = [
['24.0', [0, 3, 4]],
['29.0', []]
]
if user_submissions is not None:
field_data['user_submissions'] = user_submissions
else:
field_data['user_submissions'] = ['24.0', '29.0']
if user_voted is not None:
field_data['user_voted'] = user_voted
if moderate is not None:
field_data['moderate'] = moderate
descriptor = Mock(weight='1')
# Make the descriptor have a capa problem child.
capa_descriptor = MagicMock()
capa_descriptor.name = 'capa'
capa_descriptor.displayable_items.return_value = [capa_descriptor]
descriptor.get_children.return_value = [capa_descriptor]
# Make a fake capa module.
capa_module = MagicMock()
capa_module.lcp = MagicMock()
responder = MagicMock()
def validate_answer(answer):
""" A mock answer validator - simulates a numerical response"""
try:
float(answer)
return True
except ValueError:
return False
responder.validate_answer = validate_answer
def compare_answer(ans1, ans2):
""" A fake answer comparer """
return ans1 == ans2
responder.compare_answer = compare_answer
capa_module.lcp.responders = {'responder0': responder}
capa_module.displayable_items.return_value = [capa_module]
system = get_test_system()
# Make the system have a marginally-functional get_module
def fake_get_module(descriptor):
"""
A fake module-maker.
"""
return capa_module
system.get_module = fake_get_module
module = CrowdsourceHinterModule(descriptor, system, DictFieldData(field_data), Mock())
system.xmodule_instance = module
return module
class VerticalWithModulesFactory(object):
"""
Makes a vertical with several crowdsourced hinter modules inside.
Used to make sure that several crowdsourced hinter modules can co-exist
on one vertical.
"""
sample_problem_xml = """<?xml version="1.0"?>
<vertical display_name="Test vertical">
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown=" " rerandomize="never" showanswer="finished">
<p>Test numerical problem.</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown=" " rerandomize="never" showanswer="finished">
<p>Another test numerical problem.</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
</vertical>
"""
num = 0
@staticmethod
def next_num():
"""Increments a global counter for naming."""
CHModuleFactory.num += 1
return CHModuleFactory.num
@staticmethod
def create():
"""Make a vertical."""
field_data = {'data': VerticalWithModulesFactory.sample_problem_xml}
system = get_test_system()
descriptor = VerticalDescriptor.from_xml(VerticalWithModulesFactory.sample_problem_xml, system)
module = VerticalModule(system, descriptor, field_data)
return module
class FakeChild(XBlock):
"""
A fake Xmodule.
"""
def __init__(self):
self.runtime = get_test_system()
self.student_view = Mock(return_value=Fragment(self.get_html()))
self.save = Mock()
self.id = 'i4x://this/is/a/fake/id'
def get_html(self):
"""
Return a fake html string.
"""
return u'This is supposed to be test html.'
class CrowdsourceHinterTest(unittest.TestCase):
"""
In the below tests, '24.0' represents a wrong answer, and '42.5' represents
a correct answer.
"""
def test_gethtml(self):
"""
A simple test of get_html - make sure it returns the html of the inner
problem.
"""
mock_module = CHModuleFactory.create()
def fake_get_display_items():
"""
A mock of get_display_items
"""
return [FakeChild()]
mock_module.get_display_items = fake_get_display_items
out_html = mock_module.render('student_view').content
self.assertTrue('This is supposed to be test html.' in out_html)
self.assertTrue('i4x://this/is/a/fake/id' in out_html)
def test_gethtml_nochild(self):
"""
get_html, except the module has no child :( Should return a polite
error message.
"""
mock_module = CHModuleFactory.create()
def fake_get_display_items():
"""
Returns no children.
"""
return []
mock_module.get_display_items = fake_get_display_items
out_html = mock_module.render('student_view').content
self.assertTrue('Error in loading crowdsourced hinter' in out_html)
@unittest.skip("Needs to be finished.")
def test_gethtml_multiple(self):
"""
Makes sure that multiple crowdsourced hinters play nice, when get_html
is called.
NOT WORKING RIGHT NOW
"""
mock_module = VerticalWithModulesFactory.create()
out_html = mock_module.render('student_view').content
self.assertTrue('Test numerical problem.' in out_html)
self.assertTrue('Another test numerical problem.' in out_html)
def test_numerical_answer_to_str(self):
"""
Tests the get request to string converter for numerical responses.
"""
mock_module = CHModuleFactory.create()
get = {'response1': '4'}
parsed = mock_module.numerical_answer_to_str(get)
self.assertTrue(parsed == '4')
def test_formula_answer_to_str(self):
"""
Tests the get request to string converter for formula responses.
"""
mock_module = CHModuleFactory.create()
get = {'response1': 'x*y^2'}
parsed = mock_module.formula_answer_to_str(get)
self.assertTrue(parsed == 'x*y^2')
def test_gethint_0hint(self):
"""
Someone asks for a hint, when there's no hint to give.
- Output should be blank.
- New entry should be added to previous_answers
"""
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '26.0'}
out = mock_module.get_hint(json_in)
print mock_module.previous_answers
self.assertTrue(out is None)
self.assertTrue('26.0' in mock_module.user_submissions)
def test_gethint_unparsable(self):
"""
Someone submits an answer that is in the wrong format.
- The answer should not be added to previous_answers.
"""
mock_module = CHModuleFactory.create()
old_answers = copy.deepcopy(mock_module.previous_answers)
json_in = 'blah'
out = mock_module.get_hint(json_in)
self.assertTrue(out is None)
self.assertTrue(mock_module.previous_answers == old_answers)
def test_gethint_signature_error(self):
"""
Someone submits an answer that cannot be calculated as a float.
Nothing should change.
"""
mock_module = CHModuleFactory.create()
old_answers = copy.deepcopy(mock_module.previous_answers)
old_user_submissions = copy.deepcopy(mock_module.user_submissions)
json_in = {'problem1': 'fish'}
out = mock_module.get_hint(json_in)
self.assertTrue(out is None)
self.assertTrue(mock_module.previous_answers == old_answers)
self.assertTrue(mock_module.user_submissions == old_user_submissions)
def test_gethint_1hint(self):
"""
Someone asks for a hint, with exactly one hint in the database.
Output should contain that hint.
"""
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '25.0'}
out = mock_module.get_hint(json_in)
self.assertTrue('Really popular hint' in out['hints'])
# Also make sure that the input gets added to user_submissions,
# and that the hint is logged in previous_answers.
self.assertTrue('25.0' in mock_module.user_submissions)
self.assertTrue(['25.0', ['1']] in mock_module.previous_answers)
def test_gethint_manyhints(self):
"""
Someone asks for a hint, with many matching hints in the database.
- The top-rated hint should be returned.
- Two other random hints should be returned.
Currently, the best hint could be returned twice - need to fix this
in implementation.
"""
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '24.0'}
out = mock_module.get_hint(json_in)
self.assertTrue('Best hint' in out['hints'])
self.assertTrue(len(out['hints']) == 3)
def test_getfeedback_0wronganswers(self):
"""
Someone has gotten the problem correct on the first try.
Output should be empty.
"""
mock_module = CHModuleFactory.create(previous_answers=[], user_submissions=[])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
print out
self.assertTrue(out is None)
def test_getfeedback_1wronganswer_nohints(self):
"""
Someone has gotten the problem correct, with one previous wrong
answer. However, we don't actually have hints for this problem.
There should be a dialog to submit a new hint.
"""
mock_module = CHModuleFactory.create(previous_answers=[['26.0', [None, None, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
self.assertTrue(out['answer_to_hints'] == {'26.0': {}})
def test_getfeedback_1wronganswer_withhints(self):
"""
Same as above, except the user did see hints. There should be
a voting dialog, with the correct choices, plus a hint submission
dialog.
"""
mock_module = CHModuleFactory.create(previous_answers=[['24.0', [0, 3, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
self.assertTrue(len(out['answer_to_hints']['24.0']) == 2)
def test_getfeedback_missingkey(self):
"""
Someone gets a problem correct, but one of the hints that he saw
earlier (pk=100) has been deleted. Should just skip that hint.
"""
mock_module = CHModuleFactory.create(
previous_answers=[['24.0', [0, 100, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
self.assertTrue(len(out['answer_to_hints']['24.0']) == 1)
def test_vote_nopermission(self):
"""
A user tries to vote for a hint, but he has already voted!
Should not change any vote tallies.
"""
mock_module = CHModuleFactory.create(user_voted=True)
json_in = {'answer': '24.0', 'hint': 1, 'pk_list': json.dumps([['24.0', 1], ['24.0', 3]])}
old_hints = copy.deepcopy(mock_module.hints)
mock_module.tally_vote(json_in)
self.assertTrue(mock_module.hints == old_hints)
def test_vote_withpermission(self):
"""
A user votes for a hint.
Also tests vote result rendering.
"""
mock_module = CHModuleFactory.create(
previous_answers=[['24.0', [0, 3, None]]])
json_in = {'answer': '24.0', 'hint': 3, 'pk_list': json.dumps([['24.0', 0], ['24.0', 3]])}
dict_out = mock_module.tally_vote(json_in)
self.assertTrue(mock_module.hints['24.0']['0'][1] == 40)
self.assertTrue(mock_module.hints['24.0']['3'][1] == 31)
self.assertTrue(['Best hint', 40] in dict_out['hint_and_votes'])
self.assertTrue(['Another hint', 31] in dict_out['hint_and_votes'])
def test_vote_unparsable(self):
"""
A user somehow votes for an unparsable answer.
Should return a friendly error.
(This is an unusual exception path - I don't know how it occurs,
except if you manually make a post request. But, it seems to happen
occasionally.)
"""
mock_module = CHModuleFactory.create()
# None means that the answer couldn't be parsed.
mock_module.answer_signature = lambda text: None
json_in = {'answer': 'fish', 'hint': 3, 'pk_list': '[]'}
dict_out = mock_module.tally_vote(json_in)
print dict_out
self.assertTrue(dict_out == {'error': 'Failure in voting!'})
def test_vote_nohint(self):
"""
A user somehow votes for a hint that doesn't exist.
Should return a friendly error.
"""
mock_module = CHModuleFactory.create()
json_in = {'answer': '24.0', 'hint': '25', 'pk_list': '[]'}
dict_out = mock_module.tally_vote(json_in)
self.assertTrue(dict_out == {'error': 'Failure in voting!'})
def test_vote_badpklist(self):
"""
Some of the pk's specified in pk_list are invalid.
Should just skip those.
"""
mock_module = CHModuleFactory.create()
json_in = {'answer': '24.0', 'hint': '0', 'pk_list': json.dumps([['24.0', 0], ['24.0', 12]])}
hint_and_votes = mock_module.tally_vote(json_in)['hint_and_votes']
self.assertTrue(['Best hint', 41] in hint_and_votes)
self.assertTrue(len(hint_and_votes) == 1)
def test_submithint_nopermission(self):
"""
A user tries to submit a hint, but he has already voted.
"""
mock_module = CHModuleFactory.create(user_voted=True)
json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
print mock_module.user_voted
mock_module.submit_hint(json_in)
print mock_module.hints
self.assertTrue('29.0' not in mock_module.hints)
def test_submithint_withpermission_new(self):
"""
A user submits a hint to an answer for which no hints
exist yet.
"""
mock_module = CHModuleFactory.create()
json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
self.assertTrue('29.0' in mock_module.hints)
def test_submithint_withpermission_existing(self):
"""
A user submits a hint to an answer that has other hints
already.
"""
mock_module = CHModuleFactory.create(previous_answers=[['25.0', [1, None, None]]])
json_in = {'answer': '25.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
# Make a hint request.
json_in = {'problem name': '25.0'}
out = mock_module.get_hint(json_in)
self.assertTrue('This is a new hint.' in out['hints'])
def test_submithint_moderate(self):
"""
A user submits a hint, but moderation is on. The hint should
show up in the mod_queue, not the public-facing hints
dict.
"""
mock_module = CHModuleFactory.create(moderate='True')
json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
self.assertTrue('29.0' not in mock_module.hints)
self.assertTrue('29.0' in mock_module.mod_queue)
def test_submithint_escape(self):
"""
Make sure that hints are being html-escaped.
"""
mock_module = CHModuleFactory.create()
json_in = {'answer': '29.0', 'hint': '<script> alert("Trololo"); </script>'}
mock_module.submit_hint(json_in)
self.assertTrue(mock_module.hints['29.0']['0'][0] == u'<script> alert("Trololo"); </script>')
def test_submithint_unparsable(self):
mock_module = CHModuleFactory.create()
mock_module.answer_signature = lambda text: None
json_in = {'answer': 'fish', 'hint': 'A hint'}
dict_out = mock_module.submit_hint(json_in)
print dict_out
print mock_module.hints
self.assertTrue('error' in dict_out)
self.assertTrue(None not in mock_module.hints)
self.assertTrue('fish' not in mock_module.hints)
def test_template_gethint(self):
"""
Test the templates for get_hint.
"""
mock_module = CHModuleFactory.create()
def fake_get_hint(_):
"""
Creates a rendering dictionary, with which we can test
the templates.
"""
return {'best_hint': 'This is the best hint.',
'rand_hint_1': 'A random hint',
'rand_hint_2': 'Another random hint',
'answer': '42.5'}
mock_module.get_hint = fake_get_hint
json_in = {'problem_name': '42.5'}
out = json.loads(mock_module.handle_ajax('get_hint', json_in))['contents']
self.assertTrue('This is the best hint.' in out)
self.assertTrue('A random hint' in out)
self.assertTrue('Another random hint' in out)
def test_template_feedback(self):
"""
Test the templates for get_feedback.
NOT FINISHED
from lxml import etree
mock_module = CHModuleFactory.create()
def fake_get_feedback(get):
index_to_answer = {'0': '42.0', '1': '9000.01'}
index_to_hints = {'0': [('A hint for 42', 12),
('Another hint for 42', 14)],
'1': [('A hint for 9000.01', 32)]}
return {'index_to_hints': index_to_hints, 'index_to_answer': index_to_answer}
mock_module.get_feedback = fake_get_feedback
json_in = {'problem_name': '42.5'}
out = json.loads(mock_module.handle_ajax('get_feedback', json_in))['contents']
html_tree = etree.XML(out)
# To be continued...
"""
pass
| TsinghuaX/edx-platform | common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py | Python | agpl-3.0 | 22,068 |
#!/usr/bin/env python2
# IRPF90 is a Fortran90 preprocessor written in Python for programming using
# the Implicit Reference to Parameters (IRP) method.
# Copyright (C) 2009 Anthony SCEMAMA
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Anthony Scemama
# LCPQ - IRSAMC - CNRS
# Universite Paul Sabatier
# 118, route de Narbonne
# 31062 Toulouse Cedex 4
# [email protected]
from command_line import command_line
import irpf90_t
def run():
template = """
program codelet_%(name)s
implicit none
integer :: i
double precision :: ticks_0, ticks_1, cpu_0, cpu_1
integer, parameter :: irp_imax = %(NMAX)d
%(precondition)s
call provide_%(name)s
double precision :: irp_rdtsc
call cpu_time(cpu_0)
ticks_0 = irp_rdtsc()
do i=1,irp_imax
call bld_%(name)s
enddo
ticks_1 = irp_rdtsc()
call cpu_time(cpu_1)
print *, '%(name)s'
print *, '-----------'
print *, 'Cycles:'
print *, (ticks_1-ticks_0)/dble(irp_imax)
print *, 'Seconds:'
print *, (cpu_1-cpu_0)/dble(irp_imax)
end
"""
name, NMAX, precondition, filename = command_line.codelet
if precondition is None:
precondition = ""
else:
precondition = "PROVIDE "+precondition
file = open(filename,'w')
file.write(template%locals())
file.close()
| scemama/irpf90 | src/codelet.py | Python | gpl-2.0 | 1,981 |
# -*- coding: utf-8 -*-
"""
@author: Peter Morgan <[email protected]>
"""
from Qt import QtGui, QtCore, Qt, pyqtSignal
from ogt import ags4
import app_globals as G
from img import Ico
import xwidgets
from ags4_models import CG, AGS4_COLORS, HeadingsModel, AbbrevItemsModel
class AGS4DataDictBrowser( QtGui.QWidget ):
def __init__( self, parent=None, mode=None ):
QtGui.QWidget.__init__( self, parent )
self.debug = False
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0,0,0,0)
self.setLayout(self.mainLayout)
##=============================================================
self.tabWidget = QtGui.QTabWidget()
self.mainLayout.addWidget(self.tabWidget)
##=============================================================
self.agsGroupsWidget = AGS4GroupsBrowser(self)
self.tabWidget.addTab(self.agsGroupsWidget, Ico.icon(Ico.AgsGroups), "Groups")
self.unitsTypesWidget = AGS4UnitsTypesWidget(self)
self.tabWidget.addTab(self.unitsTypesWidget, Ico.icon(Ico.AgsField), "Units && Types")
##=============================================================
#self.agsAbbrevsWidget = AgsAbbrevsWidget.AgsAbbrevsWidget(self)
#self.tabWidget.addTab(self.agsAbbrevsWidget,dIco.icon(dIco.AgsAbbrevs), "Abbreviations / Pick lists")
##=============================================================
#self.agsUnitsWidget = AgsUnitsWidget.AgsUnitsWidget(self)
#self.tabWidget.addTab(self.agsUnitsWidget, dIco.icon(dIco.AgsUnits), "Units")
def init_load(self):
# load data dict
G.ags.init_load()
#self.tabWidget.setCurrentIndex(1)
self.agsGroupsWidget.set_focus()
class AGS4GroupsBrowser( QtGui.QWidget ):
"""The left panel with the classes, filter and groups table underneath"""
sigGroupSelected = pyqtSignal(object)
def __init__( self, parent=None):
QtGui.QWidget.__init__( self, parent )
self.debug = False
self.setObjectName("AGS4GroupsBrowser")
self.proxy = QtGui.QSortFilterProxyModel()
self.proxy.setSourceModel(G.ags.modelGroups)
self.proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
##===============================================
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0,0,0,0)
self.setLayout(self.mainLayout)
self.splitter = QtGui.QSplitter(self)
self.splitter.setObjectName(self.objectName() + "groups_splitter")
self.mainLayout.addWidget(self.splitter)
##############################################################################
leftWidget = QtGui.QWidget()
leftLayout = xwidgets.vlayout()
leftWidget.setLayout(leftLayout)
self.splitter.addWidget(leftWidget)
self.tabFilter = QtGui.QTabWidget()
leftLayout.addWidget(self.tabFilter)
##================================
## Filter
grpFilter = xwidgets.GroupGridBox()
mmm = 5
grpFilter.setContentsMargins(mmm, mmm, mmm, mmm)
# grpFilter.grid.setSpacing(5)
# grpFilter.setFixedWidth(150)
self.tabFilter.addTab(grpFilter, "Filter")
# filter combo
self.buttGroupFilter = QtGui.QButtonGroup()
self.buttGroupFilter.setExclusive(True)
#self.comboSearchFor = QtGui.QComboBox()
#grpFilter.addWidget(self.comboSearchFor)
for ridx, s in enumerate(["Code", "Description", "Code + Description"]):
rad = QtGui.QRadioButton()
rad.setText(s)
grpFilter.grid.addWidget(rad, ridx, 0, 1, 2)
self.buttGroupFilter.addButton(rad, 3 if ridx == 2 else ridx)
self.buttGroupFilter.button(0).setChecked(True)
self.buttGroupFilter.buttonClicked.connect(self.on_filter_col)
#self.comboSearchFor.addItem("Code", CG.code)
#self.comboSearchFor.addItem("Description", CG.description)
#self.comboSearchFor.addItem("Code + Description", CG.search)
#self.comboSearchFor.setMaximumWidth(150)
# clear button
self.buttClear = xwidgets.ClearButton(self, callback=self.on_clear_filter)
grpFilter.grid.addWidget(self.buttClear, 3, 0)
## filter text
self.txtFilter = QtGui.QLineEdit()
self.txtFilter.setMaximumWidth(100)
grpFilter.grid.addWidget(self.txtFilter, 3, 1)
self.txtFilter.textChanged.connect(self.on_txt_changed)
grpFilter.grid.addWidget(QtGui.QLabel(), 4, 2)
#grpFilter.layout.addStretch(3)
grpFilter.grid.setColumnStretch(0, 0)
grpFilter.grid.setColumnStretch(1, 10)
##================================
## Classification Tree
topLayout = QtGui.QVBoxLayout()
leftLayout.addLayout(topLayout, 0)
self.treeClass = QtGui.QTreeView()
self.tabFilter.addTab(self.treeClass, "By classification")
self.treeClass.setModel(G.ags.modelClasses)
self.treeClass.setRootIsDecorated(False)
self.treeClass.setExpandsOnDoubleClick(False)
self.treeClass.setFixedHeight(220)
self.treeClass.selectionModel().selectionChanged.connect(self.on_class_tree_selected)
##== Groups Tree
self.treeGroups = QtGui.QTreeView()
leftLayout.addWidget(self.treeGroups, 10)
self.treeGroups.setModel(self.proxy)
self.treeGroups.setUniformRowHeights(True)
self.treeGroups.setRootIsDecorated(False)
self.treeGroups.setAlternatingRowColors(True)
self.treeGroups.header().setStretchLastSection(True)
self.treeGroups.setColumnHidden(CG.search, True)
self.treeGroups.setColumnWidth(CG.code, 120)
self.treeGroups.setColumnWidth(CG.description, 250)
self.treeGroups.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.treeGroups.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.treeGroups.setSortingEnabled(True)
self.treeGroups.sortByColumn(CG.code)
self.treeGroups.selectionModel().selectionChanged.connect(self.on_groups_tree_selected)
self.agsGroupViewWidget = AGS4GroupViewWidget(self)
self.splitter.addWidget(self.agsGroupViewWidget)
self.splitter.setStretchFactor(0, 2)
self.splitter.setStretchFactor(1, 5)
G.settings.restore_splitter(self.splitter)
self.splitter.splitterMoved.connect(self.on_splitter_moved)
#self.statusBar = QtGui.QStatusBar()
#self.mainLayout.addWidget(self.statusBar, 0)
##############################################################################
rightWidget = QtGui.QWidget()
rightLayout = xwidgets.vlayout()
rightWidget.setLayout(rightLayout)
self.splitter.addWidget(rightWidget)
#self.agsHeadingDetailWidget = AGS4HeadingDetailWidget()
#rightLayout.addWidget(self.agsHeadingDetailWidget)
#self.init_setup()
G.ags.sigLoaded.connect(self.on_loaded)
self.txtFilter.setText("DETL")
def on_splitter_moved(self, i, pos):
G.settings.save_splitter(self.splitter)
def set_focus(self):
self.txtFilter.setFocus()
def init(self):
print "init", selfs
def on_proxy_changed(self, tl, br):
print "changes", tl, bsr
#=========================================
def on_groups_tree_selected(self, sel=None, desel=None):
if not self.treeGroups.selectionModel().hasSelection():
self.agsGroupViewWidget.set_group( None )
self.sigGroupSelected.emit( None )
return
tIdx = self.proxy.mapToSource( sel.indexes()[0] )
grp_dic = self.proxy.sourceModel().rec_from_midx( tIdx )
self.agsGroupViewWidget.set_group(grp_dic)
self.sigGroupSelected.emit( grp_dic )
def on_filter_col(self, idx):
self.update_filter()
self.txtFilter.setFocus()
def on_txt_changed(self, x):
self.update_filter()
def update_filter(self):
self.treeClass.blockSignals(True)
self.treeClass.clearSelection()
self.treeClass.blockSignals(False)
cidx = self.buttGroupFilter.checkedId()
self.proxy.setFilterKeyColumn(cidx)
txt = str(self.txtFilter.text()).strip()
if "_" in txt:
grp_code, _ = txt.split("_")
else:
grp_code = txt
self.proxy.setFilterFixedString(grp_code)
if self.proxy.rowCount() == 1:
# TODO
# #self.tree.selectionModel().select(self.proxy.index(0,0))
pass
def on_clear_filter(self):
self.txtFilter.setText("")
self.txtFilter.setFocus()
def on_class_tree_selected(self, selected, deselected):
if not self.treeClass.selectionModel().hasSelection():
self.txtFilter.setFocus()
#self.on_group_tree_selected()
return
self.proxy.setFilterKeyColumn(CG.cls)
item = self.treeClass.model().itemFromIndex(selected.indexes()[0])
if item.text() == "All":
self.proxy.setFilterFixedString("")
else:
self.proxy.setFilterFixedString(item.text())
self.txtFilter.setFocus()
def init_load(self):
pass
def on_loaded(self):
## expand first row
self.treeClass.setExpanded( self.treeClass.model().item(0,0).index(), True)
self.treeClass.sortByColumn(0, Qt.AscendingOrder)
## set sort orders
self.treeGroups.sortByColumn(CG.code, Qt.AscendingOrder)
self.treeGroups.resizeColumnToContents(CG.code)
class AGS4GroupViewWidget( QtGui.QWidget ):
"""The GroupView contains the vertically the Group Label at top, headings and notes"""
sigHeadingSelected = pyqtSignal(object)
def __init__( self, parent=None, mode=None ):
QtGui.QWidget.__init__( self, parent )
self.group_code = None
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0,0,0,0)
self.setLayout(self.mainLayout)
self.toolbar = xwidgets.hlayout()
self.mainLayout.addLayout(self.toolbar, 0)
self.icoLabel = xwidgets.IconLabel(self, ico=Ico.AgsGroup)
self.icoLabel.setStyleSheet("background-color: white; color: #444444;")
self.toolbar.addWidget(self.icoLabel)
self.lblGroupCode = QtGui.QLabel(" ")
self.lblGroupCode.setStyleSheet("background-color: white; color: %s; font-weight: bold; font-family: monospace; padding: 3px;" % AGS4_COLORS.group)
self.toolbar.addWidget(self.lblGroupCode, 1)
self.lblGroupCode.setFixedWidth(50)
self.lblDescription = QtGui.QLabel(" ")
self.lblDescription.setStyleSheet("background-color: white; color: #444444;")
self.toolbar.addWidget(self.lblDescription, 3)
self.mainLayout.addSpacing(10)
## Headings Table
self.agsHeadingsTable = AGS4HeadingsTable(self)
self.mainLayout.addWidget(self.agsHeadingsTable, 10)
##== Bottom Splitter
self.splitBott = QtGui.QSplitter()
self.splitBott.setObjectName("ags_group_view_notes_picklist")
self.mainLayout.addWidget(self.splitBott)
## Notes
self.agsGroupNotesWidget = AGS4GroupNotesWidget(self)
self.agsGroupNotesWidget.setFixedHeight(200)
self.splitBott.addWidget(self.agsGroupNotesWidget)
## Abbrs Picklist
self.agsAbbrevsWidget = AGS4AbbrevsWidget()
self.splitBott.addWidget(self.agsAbbrevsWidget)
## setup splitter
self.splitBott.setStretchFactor(0, 1)
self.splitBott.setStretchFactor(1, 1)
G.settings.restore_splitter(self.splitBott)
self.splitBott.splitterMoved.connect(self.on_splitter_bott_moved)
self.agsHeadingsTable.sigHeadingSelected.connect(self.on_heading_selection_changed)
self.agsGroupNotesWidget.sigWordClicked.connect(self.on_word_clicked)
def on_word_clicked(self, code):
code = str(code) # WTF!, its a QString not str as sent !
rec = ags4.AGS4.words.get(code)
if rec:
if rec['type'] == "heading":
# its a heading, so select it if its in within this group eg SAMP_ID is almost everywhere
found = self.agsHeadingsTable.select_heading(code)
if not found:
# so its not in this group, so open other group
parts = code.split("_")
d = AGS4GroupViewDialog(group_code=parts[0], head_code=code)
d.exec_()
if rec['type'] == "group":
if code != self.group_code:
# Dialog only if its not this group
d = AGS4GroupViewDialog(group_code=self.group_code)
d.exec_()
def on_splitter_bott_moved(self):
G.settings.save_splitter(self.splitBott)
def on_heading_selection_changed(self, head_code):
self.sigHeadingSelected.emit(head_code)
self.agsAbbrevsWidget.set_heading(head_code)
def select_heading(self, head_code):
self.agsHeadingsTable.select_heading(head_code)
def clear(self):
self.lblGroupCode.setText("")
self.lblDescription.setText("")
self.agsGroupNotesTable.clear()
self.agsAbbrevsWidget.clear()
def set_group(self, grp):
## load subwidgets, even if grp==None
self.agsHeadingsTable.set_group(grp)
self.agsGroupNotesWidget.set_group(grp)
if grp == None:
self.group_code = None
self.lblGroupCode.setText("")
self.lblDescription.setText("")
return
self.group_code = grp['group_code']
self.lblGroupCode.setText(grp['group_code'])
self.lblDescription.setText(grp['group_description'])
class AGS4GroupViewDialog(QtGui.QDialog):
def __init__(self, parent=None, group_code=None, head_code=None):
QtGui.QDialog.__init__(self, parent)
self.setWindowTitle(group_code)
self.setWindowIcon(Ico.icon(Ico.Ags4))
self.setMinimumWidth(1100)
self.setMinimumHeight(500)
self.mainLayout = QtGui.QHBoxLayout()
self.mainLayout.setSpacing(0)
margarine = 0
self.mainLayout.setContentsMargins(margarine, margarine, margarine, margarine)
self.setLayout(self.mainLayout)
self.groupViewWidget = AGS4GroupViewWidget(self)
self.mainLayout.addWidget(self.groupViewWidget)
grp = ags4.AGS4.group(group_code)
self.groupViewWidget.set_group(grp)
if head_code:
self.groupViewWidget.select_heading(head_code)
class AGS4HeadingsTable( QtGui.QWidget ):
sigHeadingSelected = pyqtSignal(object)
def __init__( self, parent ):
QtGui.QWidget.__init__( self, parent )
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0,0,0,0)
self.setLayout(self.mainLayout)
##===============================================================
self.tree = QtGui.QTreeView()
self.mainLayout.addWidget(self.tree)
self.tree.setUniformRowHeights(True)
self.tree.setRootIsDecorated(False)
self.tree.setAlternatingRowColors(True)
self.model = HeadingsModel()
self.tree.setModel(self.model)
CH = HeadingsModel.CH
self.tree.setColumnWidth(CH.strip, 3)
self.tree.setColumnWidth(CH.head_code, 100)
self.tree.setColumnWidth(CH.description, 250)
self.tree.setColumnWidth(CH.unit, 50)
self.tree.setColumnWidth(CH.status, 40)
self.tree.setColumnWidth(CH.data_type, 50)
self.tree.setColumnWidth(CH.sort_order, 20)
self.tree.header().setStretchLastSection(True)
self.tree.setSortingEnabled(False)
self.tree.setContextMenuPolicy( Qt.CustomContextMenu )
self.tree.customContextMenuRequested.connect(self.on_tree_context_menu )
self.tree.selectionModel().selectionChanged.connect(self.on_tree_selected)
self.popMenu = QtGui.QMenu()
self.actOpenGroup = self.popMenu.addAction(Ico.icon(Ico.AgsGroup), "CODEEEEE", self.on_act_open_group)
def on_tree_context_menu(self, qPoint):
idx = self.tree.indexAt(qPoint)
rec = self.model.rec_from_midx(idx)
gc = rec['head_code'].split("_")[0]
self.actOpenGroup.setDisabled(gc == self.model.grpDD['group_code'])
self.actOpenGroup.setText("Open: %s" % gc)
self.popMenu.exec_(self.tree.mapToGlobal(qPoint))
def on_act_open_group(self):
selidx = self.tree.selectionModel().selectedIndexes()
rec = self.model.rec_from_midx(selidx[0])
hc = rec.get("head_code")
gc = hc.split("_")[0]
d = AGS4GroupViewDialog(self, group_code=gc, head_code=hc)
d.exec_()
def set_group(self, grp):
self.model.set_group(grp)
def on_tree_selected(self, sel, desel):
if not self.tree.selectionModel().hasSelection():
self.sigHeadingSelected.emit( None )
return
rec = self.model.rec_from_midx( sel.indexes()[0] )
self.sigHeadingSelected.emit(rec)
def deadon_tree_context_menu(self, point):
if not self.tree.selectionModel().hasSelection():
return
def deadon_butt_pop(self, butt):
code = str(butt.property("code").toString())
p = self.mapFromGlobal(QtGui.QCursor.pos())
#print p
p = QtGui.QCursor.pos()
d = ags.AgsAbbrevPopDialog.AgsAbbrevPopDialog(self, abrev_code=code)
d.move(p.x() - 50, 100)
d.exec_()
def select_heading(self, head_code):
midx = self.model.get_heading_index(head_code)
if midx != None:
self.tree.selectionModel().setCurrentIndex(midx,
QtGui.QItemSelectionModel.SelectCurrent|QtGui.QItemSelectionModel.Rows)
class CN:
node = 0
note_id = 1
so = 2
class AGS4GroupNotesWidget( QtGui.QWidget ):
sigWordClicked = pyqtSignal(str)
def __init__( self, parent, mode=None ):
QtGui.QWidget.__init__( self, parent )
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0,0,0,0)
self.setLayout(self.mainLayout)
##==============================
scrollArea = QtGui.QScrollArea()
scrollArea.setWidgetResizable(True)
self.mainLayout.addWidget(scrollArea, 100)
self.scrollWidget = QtGui.QWidget()
scrollArea.setWidget(self.scrollWidget)
self.scrollLayout = QtGui.QVBoxLayout()
self.scrollLayout.setContentsMargins(0, 0, 0, 0)
self.scrollLayout.setSpacing(0)
self.scrollWidget.setLayout(self.scrollLayout)
def clear(self):
"""Removes all entries"""
## pain.. all this shite just to nuke a list
self.setUpdatesEnabled(False)
while self.scrollLayout.count() > 0:
vari = self.scrollLayout.itemAt(0)
w = vari.widget()
if w:
self.scrollLayout.removeWidget( w )
w.setParent(None)
w = None
else:
self.scrollLayout.removeItem( vari )
self.setUpdatesEnabled(True)
self.update()
def set_group(self, grp):
self.clear()
if grp == None:
return
notes = grp.get("notes")
if notes == None:
return
self.setUpdatesEnabled(False)
lookup = ags4.AGS4.words
for note in notes:
w = widget = QtGui.QLabel()
words = note.split(" ")
res = []
for word in words:
#print word
if word in lookup:
res.append("<a href='#%s-%s'>%s</a>" % (lookup[word]['type'], word, word))
else:
res.append(word)
widget.setText(" ".join(res))
widget.setTextFormat(QtCore.Qt.RichText)
widget.setWordWrap(True)
widget.setMargin(0)
sty = "background-color: #EEF1F8; padding: 2px; margin:0; border-bottom:1px solid #dddddd;"
widget.setStyleSheet(sty)
widget.setAlignment(QtCore.Qt.AlignTop)
self.scrollLayout.addWidget(w, 0)
#self.connect(widget, QtCore.SIGNAL("linkHovered(const QString)"), self.on_link_hover)
widget.linkActivated.connect(self.on_link_activated)
#if len(notes) < 4:
self.scrollLayout.addStretch(10)
self.setUpdatesEnabled(True)
def on_link_activated(self, lnkq):
lnk = str(lnkq)
parts = lnk[1:].split("-", 1 )
print "act", lnk, parts, type(parts[1])
self.sigWordClicked.emit( parts[1] )
class AGS4AbbrevsWidget( QtGui.QWidget ):
"""Shows pickist and accrevs etc"""
def __init__( self, parent=None):
QtGui.QWidget.__init__( self, parent )
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0,0,0,0)
self.setLayout(self.mainLayout)
self.toolbar = xwidgets.hlayout()
self.mainLayout.addLayout(self.toolbar, 0)
self.icoLabel = xwidgets.IconLabel(self, ico=Ico.AgsGroup)
self.icoLabel.setStyleSheet("background-color: white; color: #444444;")
self.toolbar.addWidget(self.icoLabel, 0)
self.lblAbbrCode = QtGui.QLabel(" ")
self.lblAbbrCode.setStyleSheet("background-color: white; color: %s; font-weight: bold; font-family: monospace; padding: 3px;" % AGS4_COLORS.group)
self.toolbar.addWidget(self.lblAbbrCode, 20)
##=== Tree
self.tree = QtGui.QTreeView()
self.mainLayout.addWidget(self.tree)
self.tree.setUniformRowHeights(True)
self.tree.setRootIsDecorated(False)
self.tree.setAlternatingRowColors(True)
self.tree.setSortingEnabled(False)
self.model = AbbrevItemsModel()
self.tree.setModel(self.model)
CA = AbbrevItemsModel.CA
self.tree.setColumnWidth(CA.code, 100)
self.tree.setColumnWidth(CA.description, 50)
self.tree.header().setStretchLastSection(True)
# TODO fix sort to ags
self.tree.setSortingEnabled(True)
self.set_heading(None)
def set_heading(self, heading):
self.model.set_heading(heading)
class PickListComboDelegate(QtGui.QItemDelegate):
"""A combobox for a table that whos the abrreviations picklist"""
def __init__(self, parent, heading):
QtGui.QItemDelegate.__init__(self, parent)
self.ogtHeading = heading
def createEditor(self, parent, option, index):
editor = QtGui.QComboBox(parent)
editor.addItem("--unknown--", "")
# populate combobox from abbreviations
for typ in ags4.AGS4.picklist(self.ogtHeading.head_code):
editor.addItem( "%s: %s " % (typ['code'], typ['description']), typ['code'])
return editor
def setEditorData(self, editor, index):
editor.blockSignals(True)
curr = index.model().data(index).toString()
idx = editor.findData(curr)
if idx != -1:
editor.setCurrentIndex(idx)
editor.blockSignals(False)
def setModelData(self, editor, model, index):
txt = editor.itemData(editor.currentIndex()).toString()
model.setData(index, txt)
class NumberEditDelegate(QtGui.QItemDelegate):
"""Number editor to n decimal places"""
def __init__(self, parent, heading):
QtGui.QItemDelegate.__init__(self, parent)
self.ogtHeading = heading
##self.data_type = heading['type']
#self.data_type = heading['type']
self.dp = None
if self.ogtHeading.type.endswith("DP"):
self.dp = int(self.ogtHeading.type[:-2])
def createEditor(self, parent, option, index):
editor = QtGui.QLineEdit(parent)
if self.ogtHeading.type.endswith("DP"):
validator = QtGui.QDoubleValidator()
validator.setDecimals(self.dp)
editor.setValidator(validator)
return editor
def setEditorData(self, editor, index):
editor.blockSignals(True)
curr = index.model().data(index) #.toString()
editor.setText(curr)
editor.blockSignals(False)
def setModelData(self, editor, model, index):
no = float(editor.text())
f = "%01."
f += "%sf" % self.dp
#print f
txt = f % (no,)
model.setData(index, txt)
class IDComboDelegate(QtGui.QItemDelegate):
"""A combobox for the ID"""
def __init__(self, parent, heading, options):
QtGui.QItemDelegate.__init__(self, parent)
self.ogtHeading = heading
self.options = options
def createEditor(self, parent, option, index):
editor = QtGui.QComboBox(parent)
editor.addItem("--unknown--", "")
# populate combobox from abbreviations
for v in self.options:
editor.addItem( "%s" % v, "%s" % v)
return editor
def setEditorData(self, editor, index):
editor.blockSignals(True)
curr = index.model().data(index).toString()
idx = editor.findData(curr)
if idx != -1:
editor.setCurrentIndex(idx)
editor.blockSignals(False)
def setModelData(self, editor, model, index):
txt = editor.itemData(editor.currentIndex()).toString()
model.setData(index, txt)
class AGS4UnitsTypesWidget( QtGui.QWidget ):
"""The Units and Types tab"""
def __init__( self, parent=None):
QtGui.QWidget.__init__( self, parent )
self.debug = False
self.setObjectName("AGS4UnitTypesWidget")
self.proxyUnits = QtGui.QSortFilterProxyModel()
self.proxyUnits.setSourceModel(G.ags.modelUnits)
self.proxyUnits.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxyTypes = QtGui.QSortFilterProxyModel()
self.proxyTypes.setSourceModel(G.ags.modelTypes)
self.proxyTypes.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
##===============================================
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0,0,0,0)
self.setLayout(self.mainLayout)
self.splitter = QtGui.QSplitter()
self.mainLayout.addWidget(self.splitter)
self.treeUnits = self.make_tree(self.proxyUnits, "Unit", "Description")
self.splitter.addWidget(self.treeUnits)
self.treeTypes = self.make_tree(self.proxyTypes, "Type", "Description")
self.splitter.addWidget(self.treeTypes)
def make_tree(self, model, tit1, tit2):
tree = QtGui.QTreeView()
tree.setRootIsDecorated(False)
tree.setSortingEnabled(True)
tree.setModel(model)
return tree
hi = tree.headerItem()
hi.setText(0, tit1)
hi.setText(1, tit2)
return tree
| open-geotechnical/ogt-ags-py | ogtgui/ags4_widgets.py | Python | gpl-2.0 | 27,425 |
from __future__ import print_function
from builtins import str
from pomdpy.discrete_pomdp import DiscreteState
class TigerState(DiscreteState):
"""
Enumerated state for the Tiger POMDP
Consists of a boolean "door_open" containing info on whether the state is terminal
or not. Terminal states are reached after a door is opened. "door_open" will be false
until an "open door" action is taken. This aspect of the state is *clearly* fully observable
The list "door_prizes" contains 0's for doors that have tigers behind them, and 1's
for doors that have prizes behind them. This part of the state is obscured. Listening
actions are necessary to increase confidence in choosing the right door. A single TigerState represents a
"guess" of the true belief state - which is the probability distribution over all states
For a 2-door system, either door_prizes[0] = 0 and door_prizes[1] = 1, or
door_prizes[0] = 1 and door_prizes[1] = 0
door_open = False, door_prizes[0] = 0, and door_prizes[1] = 1
door_open = False, door_prizes[0] = 1, and door_prizes[1] = 0
--------------------------------------------------------------
Placeholder for showing that the Markov Chain has reached an absorbing state ->
door_open = True, door_prizes[0] = X, and door_prizes[1] = X
"""
def __init__(self, door_open, door_prizes):
self.door_open = door_open # lists
self.door_prizes = door_prizes
def distance_to(self, other_state):
return self.equals(other_state)
def copy(self):
return TigerState(self.door_open, self.door_prizes)
def equals(self, other_state):
if self.door_open == other_state.door_open and \
self.door_prizes == other_state.door_prizes:
return 1
else:
return 0
def hash(self):
pass
def as_list(self):
"""
Concatenate both lists
:return:
"""
return self.door_open + self.door_prizes
def to_string(self):
if self.door_open:
state = 'Door is open'
else:
state = 'Door is closed'
return state + ' (' + str(self.door_prizes[0]) + ', ' + str(self.door_prizes[1]) + ')'
def print_state(self):
print(self.to_string())
| pemami4911/POMDPy | examples/tiger/tiger_state.py | Python | mit | 2,337 |
import os
config = {
"buildbot_json_path": "buildprops.json",
"hostutils_manifest_path": "testing/config/tooltool-manifests/linux64/hostutils.manifest",
"tooltool_manifest_path": "testing/config/tooltool-manifests/androidx86/releng.manifest",
"tooltool_cache": "/home/worker/tooltool_cache",
"download_tooltool": True,
"tooltool_servers": ['http://relengapi/tooltool/'],
"avds_dir": "/home/worker/workspace/build/.android",
"emulator_manifest": """
[
{
"size": 193383673,
"digest": "6609e8b95db59c6a3ad60fc3dcfc358b2c8ec8b4dda4c2780eb439e1c5dcc5d550f2e47ce56ba14309363070078d09b5287e372f6e95686110ff8a2ef1838221",
"algorithm": "sha512",
"filename": "android-sdk18_0.r18moz1.orig.tar.gz",
"unpack": "True"
}
] """,
"emulator_process_name": "emulator64-x86",
"emulator_extra_args": "-show-kernel -debug init,console,gles,memcheck,adbserver,adbclient,adb,avd_config,socket -qemu -m 1024",
"device_manager": "adb",
"exes": {
'adb': '%(abs_work_dir)s/android-sdk18/platform-tools/adb',
},
"env": {
"DISPLAY": ":0.0",
"PATH": "%(PATH)s:%(abs_work_dir)s/android-sdk18/tools:%(abs_work_dir)s/android-sdk18/platform-tools",
"MINIDUMP_SAVEPATH": "%(abs_work_dir)s/../minidumps"
},
"default_actions": [
'clobber',
'read-buildbot-config',
'setup-avds',
'start-emulator',
'download-and-extract',
'create-virtualenv',
'verify-emulator',
'install',
'run-tests',
],
"emulator": {
"name": "test-1",
"device_id": "emulator-5554",
"http_port": "8854", # starting http port to use for the mochitest server
"ssl_port": "4454", # starting ssl port to use for the server
"emulator_port": 5554,
},
"suite_definitions": {
"xpcshell": {
"run_filename": "remotexpcshelltests.py",
"testsdir": "xpcshell",
"install": False,
"options": [
"--dm_trans=adb",
"--xre-path=%(xre_path)s",
"--testing-modules-dir=%(modules_dir)s",
"--apk=%(installer_path)s",
"--no-logfiles",
"--symbols-path=%(symbols_path)s",
"--manifest=tests/xpcshell.ini",
"--log-raw=%(raw_log_file)s",
"--log-errorsummary=%(error_summary_file)s",
"--test-plugin-path=none",
],
},
"mochitest-chrome": {
"run_filename": "runtestsremote.py",
"testsdir": "mochitest",
"options": [
"--dm_trans=adb",
"--app=%(app)s",
"--remote-webserver=%(remote_webserver)s",
"--xre-path=%(xre_path)s",
"--utility-path=%(utility_path)s",
"--http-port=%(http_port)s",
"--ssl-port=%(ssl_port)s",
"--certificate-path=%(certs_path)s",
"--symbols-path=%(symbols_path)s",
"--quiet",
"--log-raw=%(raw_log_file)s",
"--log-errorsummary=%(error_summary_file)s",
"--extra-profile-file=fonts",
"--extra-profile-file=hyphenation",
"--screenshot-on-fail",
"--flavor=chrome",
],
},
}, # end suite_definitions
"download_minidump_stackwalk": True,
"default_blob_upload_servers": [
"https://blobupload.elasticbeanstalk.com",
],
"blob_uploader_auth_file": os.path.join(os.getcwd(), "oauth.txt"),
}
| Yukarumya/Yukarum-Redfoxes | testing/mozharness/configs/android/androidx86-tc.py | Python | mpl-2.0 | 3,677 |
# -*- coding: utf-8 -*-
''' Checks ground_motion_record function
by integrating the accelerations calculated as follows:
x= 9*t**3+10*t**2
xdot= 27*t**2+20*t
xdotdot= 54*t+20 '''
import xc_base
import geom
import xc
__author__= "Luis C. Pérez Tato (LCPT) and Ana Ortega (AOO)"
__copyright__= "Copyright 2015, LCPT and AOO"
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected]"
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
#Load modulation.
loadHandler= preprocessor.getLoadHandler
lPatterns= loadHandler.getLoadPatterns
ts= lPatterns.newTimeSeries("constant_ts","ts")
gm= lPatterns.newLoadPattern("uniform_excitation","gm")
mr= gm.motionRecord
hist= mr.history
accel= lPatterns.newTimeSeries("path_time_ts","accel")
accel.path= xc.Vector([20,74,128,182,236,290,344,398])
accel.time= xc.Vector([0,1,2,3,4,5,6,7])
hist.accel= accel
hist.delta= 0.01
motionHistoryDuration= mr.getDuration()
motionHistoryPathSize= mr.history.getNumberOfDataPoints()
motionHistoryAccel= mr.getAccel(0.5)
motionHistoryPeakAccel= mr.getPeakAccel()
motionHistoryDelta= mr.history.delta
motionHistoryVel= mr.getVel(0.5)
motionHistoryPeakVel= mr.getPeakVel()
motionHistoryDisp= mr.getDisp(0.5)
motionHistoryPeakDisp= mr.getPeakDisp()
ratio1= (motionHistoryDuration-7)/7
ratio2= (motionHistoryAccel-47)/47
ratio3= (motionHistoryPeakAccel-398)/398
ratio4= (motionHistoryDelta-0.01)/0.01
ratio5= (motionHistoryVel-16.75)/16.75
ratio6= (motionHistoryPathSize-8)/8
ratio7= (motionHistoryPeakVel-1458)/1458
ratio8= (motionHistoryDisp-3.63)/3.63
'''
print "duration= ",motionHistoryDuration
print "ratio1= ",ratio1
print "accel= ",motionHistoryAccel
print "ratio2= ",ratio2
print "peak accel= ",motionHistoryPeakAccel
print "ratio3= ",ratio3
print "delta= ",motionHistoryDelta
print "ratio4= ",ratio4
print "vel= ",motionHistoryVel
print "ratio5= ",ratio5
print "path size= ",motionHistoryPathSize
print "ratio6= ",ratio6
print "peak vel= ",motionHistoryPeakVel
print "ratio7= ",ratio7
print "disp= ",motionHistoryDisp
print "ratio8= ",ratio8
'''
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (abs(ratio1)<1e-15) & (abs(ratio2)<1e-15) & (abs(ratio3)<1e-15) & (abs(ratio4)<1e-15) & (abs(ratio5)<motionHistoryDelta) & (abs(ratio6)<1e-15) & (abs(ratio7)<motionHistoryDelta) & (abs(ratio8)<2*motionHistoryDelta) :
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| lcpt/xc | verif/tests/loads/test_ground_motion_04.py | Python | gpl-3.0 | 2,458 |
from _external import *
from glut import *
ssba = LibWithHeaderChecker( ['V3D','ldl','colamd'],
'Geometry/v3d_metricbundle.h',
'c++',
name='ssba',
defines=['V3DLIB_ENABLE_SUITESPARSE'],
dependencies=[glut] )
| tuttleofx/sconsProject | autoconf/ssba.py | Python | mit | 355 |
import wx
import wx.lib.agw.pycollapsiblepane as PCP
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "PyCollapsiblePane Demo")
panel = wx.Panel(self)
title = wx.StaticText(panel, label="PyCollapsiblePane")
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
title2 = wx.StaticText(panel, label="PyCollapsiblePane")
title.SetForegroundColour("black")
self.cp = cp = PCP.PyCollapsiblePane(panel, label="Some Data",
style=wx.CP_DEFAULT_STYLE)
self.MakePaneContent(cp.GetPane())
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(title, 0, wx.ALL, 25)
sizer.Add(cp, 0, wx.RIGHT | wx.LEFT | wx.EXPAND, 25)
sizer.Add(title2, 0, wx.ALL, 25)
panel.SetSizer(sizer)
sizer.Layout()
def MakePaneContent(self, pane):
''' Just makes a few controls to put on `PyCollapsiblePane`. '''
nameLbl = wx.StaticText(pane, -1, "Name:")
name = wx.TextCtrl(pane, -1, "");
addrLbl = wx.StaticText(pane, -1, "Address:")
addr1 = wx.TextCtrl(pane, -1, "");
addr2 = wx.TextCtrl(pane, -1, "");
cstLbl = wx.StaticText(pane, -1, "City, State, Zip:")
city = wx.TextCtrl(pane, -1, "", size=(150,-1));
state = wx.TextCtrl(pane, -1, "", size=(50,-1));
zip = wx.TextCtrl(pane, -1, "", size=(70,-1));
addrSizer = wx.FlexGridSizer(cols=2, hgap=5, vgap=5)
addrSizer.AddGrowableCol(1)
addrSizer.Add(nameLbl, 0,
wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
addrSizer.Add(name, 0, wx.EXPAND)
addrSizer.Add(addrLbl, 0,
wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
addrSizer.Add(addr1, 0, wx.EXPAND)
addrSizer.Add((5,5))
addrSizer.Add(addr2, 0, wx.EXPAND)
addrSizer.Add(cstLbl, 0,
wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
cstSizer = wx.BoxSizer(wx.HORIZONTAL)
cstSizer.Add(city, 1)
cstSizer.Add(state, 0, wx.LEFT | wx.RIGHT, 5)
cstSizer.Add(zip)
addrSizer.Add(cstSizer, 0, wx.EXPAND)
border = wx.BoxSizer()
border.Add(addrSizer, 1, wx.EXPAND | wx.ALL, 5)
pane.SetSizer(border)
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
| ODM2/ODM2StreamingDataLoader | src/wizard/controller/old/a.py | Python | bsd-3-clause | 2,472 |
from pyramid.events import NewRequest
from pyramid.httpexceptions import HTTPBadRequest
from pyramid_restler.view import RESTfulView
def add_restful_routes(self, name, factory, view=RESTfulView,
route_kw=None, view_kw=None):
"""Add a set of RESTful routes for an entity.
URL patterns for an entity are mapped to a set of views encapsulated in
a view class. The view class interacts with the model through a context
adapter that knows the particulars of that model.
To use this directive in your application, first call
`config.include('pyramid_restler')` somewhere in your application's
`main` function, then call `config.add_restful_routes(...)`.
``name`` is used as the base name for all route names and patterns. In
route names, it will be used as-is. In route patterns, underscores will
be converted to dashes.
``factory`` is the model adapter that the view interacts with. It can be
any class that implements the :class:`pyramid_restler.interfaces.IContext`
interface.
``view`` must be a view class that implements the
:class:`pyramid_restler.interfaces.IView` interface.
Additional route and view keyword args can be passed directly through to
all `add_route` and `add_view` calls. Pass ``route_kw`` and/or ``view_kw``
as dictionaries to do so.
"""
route_kw = {} if route_kw is None else route_kw
view_kw = {} if view_kw is None else view_kw
view_kw.setdefault('http_cache', 0)
subs = dict(
name=name,
slug=name.replace('_', '-'),
id='{id}',
renderer='{renderer}')
def add_route(name, pattern, attr, method):
name = name.format(**subs)
pattern = pattern.format(**subs)
self.add_route(
name, pattern, factory=factory,
request_method=method, **route_kw)
self.add_view(
view=view, attr=attr, route_name=name,
request_method=method, **view_kw)
# Get collection
add_route(
'get_{name}_collection_rendered', '/{slug}.{renderer}',
'get_collection', 'GET')
add_route(
'get_{name}_collection', '/{slug}', 'get_collection', 'GET')
# Get member
add_route(
'get_{name}_rendered', '/{slug}/{id}.{renderer}', 'get_member', 'GET')
add_route('get_{name}', '/{slug}/{id}', 'get_member', 'GET')
# Create member
add_route('create_{name}', '/{slug}', 'create_member', 'POST')
# Update member
add_route('update_{name}', '/{slug}/{id}', 'update_member', 'PUT')
# Delete member
add_route('delete_{name}', '/{slug}/{id}', 'delete_member', 'DELETE')
def enable_POST_tunneling(self, allowed_methods=('PUT', 'DELETE')):
"""Allow other request methods to be tunneled via POST.
This allows PUT and DELETE requests to be tunneled via POST requests.
The method can be specified using a parameter or a header...
The name of the parameter is '$method'; it can be a query or POST
parameter. The query parameter will be preferred if both the query and
POST parameters are present in the request.
The name of the header is 'X-HTTP-Method-Override'. If the parameter
described above is passed, this will be ignored.
The request method will be overwritten before it reaches application
code, such that the application will never be aware of the original
request method. Likewise, the parameter and header will be removed from
the request, and the application will never see them.
"""
param_name = '$method'
header_name = 'X-HTTP-Method-Override'
allowed_methods = set(allowed_methods)
disallowed_message = (
'Only these methods may be tunneled over POST: {0}.'
.format(sorted(list(allowed_methods))))
def new_request_subscriber(event):
request = event.request
if request.method == 'POST':
if param_name in request.GET:
method = request.GET[param_name]
elif param_name in request.POST:
method = request.POST[param_name]
elif header_name in request.headers:
method = request.headers[header_name]
else:
return # Not a tunneled request
if method in allowed_methods:
request.GET.pop(param_name, None)
request.POST.pop(param_name, None)
request.headers.pop(header_name, None)
request.method = method
else:
raise HTTPBadRequest(disallowed_message)
self.add_subscriber(new_request_subscriber, NewRequest)
| wylee/pyramid_restler | pyramid_restler/config.py | Python | mit | 4,625 |
"""Generates constants for use in blinkpy."""
import os
MAJOR_VERSION = 0
MINOR_VERSION = 19
PATCH_VERSION = "0.rc0"
__version__ = f"{MAJOR_VERSION}.{MINOR_VERSION}.{PATCH_VERSION}"
REQUIRED_PYTHON_VER = (3, 6, 0)
PROJECT_NAME = "blinkpy"
PROJECT_PACKAGE_NAME = "blinkpy"
PROJECT_LICENSE = "MIT"
PROJECT_AUTHOR = "Kevin Fronczak"
PROJECT_COPYRIGHT = f" 2017, {PROJECT_AUTHOR}"
PROJECT_URL = "https://github.com/fronzbot/blinkpy"
PROJECT_EMAIL = "[email protected]"
PROJECT_DESCRIPTION = "A Blink camera Python library " "running on Python 3."
PROJECT_LONG_DESCRIPTION = (
"blinkpy is an open-source "
"unofficial API for the Blink Camera "
"system with the intention for easy "
"integration into various home "
"automation platforms."
)
if os.path.exists("README.rst"):
PROJECT_LONG_DESCRIPTION = open("README.rst").read()
PROJECT_CLASSIFIERS = [
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Home Automation",
]
PROJECT_GITHUB_USERNAME = "fronzbot"
PROJECT_GITHUB_REPOSITORY = "blinkpy"
PYPI_URL = f"https://pypi.python.org/pypi/{PROJECT_PACKAGE_NAME}"
"""
URLS
"""
BLINK_URL = "immedia-semi.com"
DEFAULT_URL = f"rest-prod.{BLINK_URL}"
BASE_URL = f"https://{DEFAULT_URL}"
LOGIN_ENDPOINT = f"{BASE_URL}/api/v5/account/login"
"""
Dictionaries
"""
ONLINE = {"online": True, "offline": False}
"""
OTHER
"""
DEFAULT_USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
DEVICE_ID = "Blinkpy"
TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S%z"
DEFAULT_MOTION_INTERVAL = 1
DEFAULT_REFRESH = 30
MIN_THROTTLE_TIME = 2
SIZE_NOTIFICATION_KEY = 152
SIZE_UID = 16
TIMEOUT = 10
TIMEOUT_MEDIA = 90
| fronzbot/blinkpy | blinkpy/helpers/constants.py | Python | mit | 1,967 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient le module primaire supenr."""
import copy
import os
import pickle
import sys
import time
from yaml import dump, load
transforms = []
try:
from bson.errors import InvalidDocument
from bson.objectid import ObjectId
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from primaires.supenr.fraction import TransformFraction
transforms.append(TransformFraction())
except ImportError:
MongoClient = None
from abstraits.module import *
from abstraits.obase import *
from primaires.supenr import commandes
from primaires.supenr.config import cfg_supenr
# Dossier d'enregistrement des fichiers-données
# Vous pouvez changer cette variable, ou bien spécifier l'option en
# ligne de commande
REP_ENRS = os.path.expanduser("~") + os.sep + "kassie"
class Module(BaseModule):
"""Classe du module 'supenr'.
Ce module gère l'enregistrement des données et leur récupération.
Les objets enregistrés doivent dériver indirectement de
abstraits.obase.BaseObj (voir abstraits/obase/__init__.py pour plus
d'informations).
Habituellement, il n'est pas nécessaire de manipuler directement
ce module.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "supenr", "primaire")
self.cfg = None
self.mode = "pickle"
self.logger = type(self.importeur).man_logs.creer_logger("supenr", \
"supenr")
self.enregistre_actuellement = False
self.fichiers = {}
self.pret = False
self.met_preparer = []
# Objets utiles pour MongoDB
self.mongo_db = None
self.mongo_file = set()
self.mongo_collections = {}
self.mongo_objets = {}
self.mongo_debug = False
def config(self):
"""Configuration du module.
On se base sur parser_cmd pour savoir si un dossier d'enregistrement
des fichiers-données a été défini.
Cette donnée peut également se trouver dans les données globales de
configuration.
Une fois qu'on a obtenu cette donnée, on charge les fichiers
yml qui servent de complément à l'enregistrement.
"""
global REP_ENRS
self.cfg = importeur.anaconf.get_config("supenr",
"supenr/supenr.cfg", "module d'enregistrement", cfg_supenr)
self.mode = self.cfg.mode
parser_cmd = type(self.importeur).parser_cmd
config_globale = type(self.importeur).anaconf.get_config("globale")
# Si le mode d'enregistrement est mongo
if self.mode == "mongo":
self.config_mongo()
# Si le mode d'enregistrement est pickle
if self.mode == "pickle":
if config_globale.chemin_enregistrement:
REP_ENRS = config_globale.chemin_enregistrement
if "chemin-enregistrement" in parser_cmd.keys():
REP_ENRS = parser_cmd["chemin-enregistrement"]
# On construit le répertoire s'il n'existe pas
if not os.path.exists(REP_ENRS):
os.makedirs(REP_ENRS)
elif self.mode != "mongo":
self.logger.fatal("Mode d'enregistrement {} inconnu.".format(
repr(mode)))
sys.exit(1)
# On augmente la limite de récursion
sys.setrecursionlimit(20000)
self.pret = True
# Chargement des fichiers yml
if importeur.sauvegarde:
for nom_fichier in os.listdir(REP_ENRS):
if nom_fichier.endswith(".yml"):
fichier = open(REP_ENRS + os.sep + nom_fichier, "r")
contenu = fichier.read()
donnees = load(contenu)
nom = nom_fichier[:-4]
self.fichiers[nom] = donnees
self.logger.info("Chargement du fichier YML {}".format(
repr(nom)))
BaseModule.config(self)
def config_mongo(self):
"""Configuration spécifique de MongoDB."""
if MongoClient is None:
self.logger.warning("Impossible de charger pymongo, " \
"retour au mode d'enregistrement 'pickle'")
self.mode = "pickle"
else:
# On essaye de se connecter
try:
connexion = MongoClient()
except ConnectionFailure:
self.logger.warning("Impossible de se connecter au " \
"serveur MongoDB. Retour sur le mode 'pickle'")
self.mode = "pickle"
else:
self.mongo_db = connexion[self.cfg.nom_mongodb]
#self.mongo_db.add_son_manipulator(TransformFraction())
def init(self):
"""Chargement de tous les objets (pickle)."""
if self.mode == "pickle":
self.charger()
else: # Mongo
importeur.diffact.ajouter_action("enregistrement", 1,
self.mongo_enregistrer_file)
BaseModule.init(self)
def ajouter_commandes(self):
"""Ajoute les commandes à l'interpréteur."""
self.commandes = [
commandes.enregistrer.CmdEnregistrer(),
commandes.mongo.CmdMongo(),
]
for cmd in self.commandes:
importeur.interpreteur.ajouter_commande(cmd)
def preparer(self):
"""Appel des méthodes différées."""
for liste in self.met_preparer:
callback = liste[0]
arguments = liste[1:]
callback(*arguments)
def detruire(self):
"""Destruction du module"""
if self.mode == "pickle":
self.enregistrer()
else:
self.mongo_enregistrer_file(False)
BaseModule.detruire(self)
def sauver_fichier(self, nom, donnees):
"""Sauvegarde le fichier XML précisé.
Le fichier sera le nom avec l'extension '.yml'. Les
données doivent être transmises dans un dictionnaire.
"""
if not importeur.sauvegarde:
return
chemin = REP_ENRS + os.sep + nom + ".yml"
contenu = dump(donnees, default_flow_style=False)
fichier = open(chemin, "w")
fichier.write(contenu)
fichier.close()
def enregistrer(self):
"""Méthode appelée pour enregistrer TOUS les objets par pickle."""
if not importeur.sauvegarde:
return
global REP_ENRS
if not self.pret:
raise RuntimeError("le supenr n'est pas prêt à enregistrer")
if self.enregistre_actuellement:
return
a_enregistrer = [o for o in objets.values() if o.e_existe]
self.logger.info("{} objets, dans {}o sont prêts à être " \
"enregistrés.".format(str(len(a_enregistrer)),
str(len(pickle.dumps(a_enregistrer)))))
self.enregistre_actuellement = True
chemin_dest = REP_ENRS + os.sep + "enregistrements.bin"
# On essaye d'ouvrir le fichier
try:
fichier_enr = open(chemin_dest, 'wb')
except IOError as io_err:
self.logger.warning("Le fichier {} destiné à enregistrer " \
"les objets de Kassie n'a pas pu être ouvert : {}".format(
chemin_dest, io_err))
else:
pickler = pickle.Pickler(fichier_enr)
pickler.dump(a_enregistrer)
finally:
if "fichier_enr" in locals():
fichier_enr.close()
self.enregistre_actuellement = False
for classe, liste in objets_par_type.items():
liste = [o for o in liste if o.e_existe]
def enregistrer_periodiquement(self):
"""Cette méthode est appelée périodiquement pour enregistrer (pickle).
On enregistre tous les objets dans la sauvegarde pickle.
"""
importeur.diffact.ajouter_action("enregistrement", 60 * 60,
self.enregistrer_periodiquement)
if self.enregistre_actuellement:
return
t1 = time.time()
self.enregistrer()
t2 = time.time()
print("Enregistrement fait en", t2 - t1)
def charger(self):
"""Charge le fichier indiqué et retourne l'objet dépicklé"""
if not importeur.sauvegarde:
return
global REP_ENRS
chemin_dest = REP_ENRS + os.sep + "enregistrements.bin"
try:
fichier_enr = open(chemin_dest, 'rb')
except IOError as io_err:
self.logger.warning("Le fichier {} n'a pas pu être ouvert " \
": {}".format(chemin_dest, io_err))
else:
unpickler = pickle.Unpickler(fichier_enr)
try:
rec = unpickler.load()
except (EOFError, pickle.UnpicklingError):
self.logger.warning("Le fichier {} n'a pas pu être " \
"chargé ".format(chemin_dest))
finally:
if "fichier_enr" in locals():
fichier_enr.close()
self.logger.info("{} objets récupérés".format(len(objets)))
def ajouter_objet(self, objet):
"""Ajoute les objets à la file des enregistrements."""
if self.mode == "mongo":
self.mongo_file.add(objet)
def detruire_objet(self, objet):
"""Détruit l'objet."""
if self.mode == "mongo":
nom = self.qualname(type(objet))
if "_id" in objet.__dict__:
self.mongo_db[nom].remove(objet._id)
def charger_groupe(self, groupe):
"""Cette fonction retourne les objets d'un groupe.
Le mode 'pickle' se base sur objets_par_type. Le mode 'mongo'
récupère les collections et les fusionne (il peut y avoir
plusieurs collections pour un seul groupe. Un groupe étant
une classe, ses classes héritées sont également chargées.
"""
if not importeur.sauvegarde:
return []
if not self.pret:
raise RuntimeError("le supenr n'est pas prêt à charger un groupe")
objets = []
if self.mode == "pickle":
for cls in objets_par_type.keys():
if issubclass(cls, groupe):
objets.extend(objets_par_type[cls])
else:
for cls in classes_base.values():
if issubclass(cls, groupe):
objets.extend(self.mongo_charger_collection(cls))
return objets
def charger_unique(self, groupe):
"""Cette fonction retourne l'objet unique correspondant.
Si plusieurs objets uniques semblent exister du même type, retourne
le premier à avoir été chargé.
"""
if not importeur.sauvegarde:
return None
if not self.pret:
raise RuntimeError("le supenr n'est pas prêt à charger un groupe")
if self.mode == "pickle":
return objets_par_type.get(groupe, [None])[0]
else:
objets = self.charger_groupe(groupe)
if len(objets) == 0:
return None
elif len(objets) > 1:
print("Plus d'un objet unique du même type: {}".format(objets))
return objets[0]
def mongo_charger_collection(self, classe):
"""Charge la collection correspondante.
Les objets chargés sont retournés sous la forme d'une liste.
"""
nom = self.qualname(classe)
objets = []
valeurs = self.mongo_db[nom].find()
for attributs in valeurs:
_id = attributs["_id"]
objet = self.mongo_charger_objet(classe, _id)
objets.append(objet)
return objets
def mongo_charger_objet(self, classe, _id):
"""Récupère un objet individuel.
Cette méthode va :
Interroger les objets déjà chargés
Ou charger l'objet depuis MongoDB
"""
nom = self.qualname(classe)
charges = self.mongo_objets.get(nom, {})
if _id in charges:
return charges[_id]
collection = self.mongo_db[nom]
objet = classe(*classe.__getnewargs__(classe))
enr = self.mongo_objets.get(nom, {})
enr[_id] = objet
self.mongo_objets[nom] = enr
# Traitement des attributs
attributs = collection.find_one(_id)
if attributs is None:
del enr["_d"]
return None
self.mongo_charger_dictionnaire(attributs)
for transform in transforms:
transform.transform_outgoing(attributs, collection)
objet.__setstate__(attributs)
return objet
def mongo_charger_dictionnaire(self, dictionnaire):
"""Charge les informations d'un dictionnaire."""
for cle, valeur in tuple(dictionnaire.items()):
if isinstance(valeur, list) and len(valeur) == 2 and \
isinstance(valeur[0], str) and isinstance(valeur[1],
ObjectId):
classe = classes_base[valeur[0]]
objet = self.mongo_charger_objet(classe, valeur[1])
dictionnaire[cle] = objet
elif isinstance(valeur, list):
self.mongo_charger_liste(valeur)
elif isinstance(valeur, dict):
self.mongo_charger_dictionnaire(valeur)
def mongo_charger_liste(self, liste):
"""Charge la liste."""
copie = []
for valeur in liste:
if isinstance(valeur, list) and len(valeur) == 2 and \
isinstance(valeur[0], str) and isinstance(valeur[1],
ObjectId):
classe = classes_base[valeur[0]]
objet = self.mongo_charger_objet(classe, valeur[1])
copie.append(objet)
elif isinstance(valeur, list):
self.mongo_charger_liste(valeur)
copie.append(valeur)
elif isinstance(valeur, dict):
self.mongo_charger_dictionnaire(valeur)
copie.append(valeur)
else:
copie.append(valeur)
liste[:] = copie
def mongo_enregistrer_file(self, rappel=True, debug=False):
"""Enregistre la file des objets (mongo).
Les objets à enregistrer sont soit à insérer, soit à
modifier.
"""
if not debug:
debug = self.mongo_debug
if rappel:
importeur.diffact.ajouter_action("enregistrement", 10,
self.mongo_enregistrer_file)
if debug:
print("Premier passage")
t1 = time.time()
reste = []
for objet in self.mongo_file:
second, attributs = self.extraire_attributs(objet)
if debug:
print(" ", type(objet), attributs)
self.mongo_enregistrer_objet(objet, attributs)
if second:
reste.append(objet)
if debug:
print("Second passage")
for objet in reste:
second, attributs = self.extraire_attributs(objet)
if debug:
print(" ", type(objet), attributs)
self.mongo_enregistrer_objet(objet, attributs)
self.mongo_file.clear()
t2 = time.time()
self.mongo_debug = False
return t2 - t1
def mongo_enregistrer_objet(self, objet, attributs):
"""Enregistre l'objet dans une base MongoDB.
Si l'objet n'existe pas, l'insert. Sinon le met à jour.
"""
nom = self.qualname(type(objet))
collection = self.mongo_db[nom]
for transform in transforms:
transform.transform_incoming(attributs, collection)
if "_id" in attributs: # L'objet existe
_id = attributs.pop("_id")
try:
collection.update({"_id": _id}, attributs)
except InvalidDocument as err:
print(err, objet, type(objet), attributs)
sys.exit(1)
else:
try:
_id = collection.insert(attributs)
except InvalidDocument as err:
print(err, objet, type(objet), attributs)
sys.exit(1)
objet._id = _id
enr = self.mongo_objets.get(nom, {})
enr[_id] = objet
self.mongo_objets[nom] = enr
def extraire_attributs(self, objet):
"""Méthode utilisée par MongoDB pour extraire les attributs d'un objet.
On s'inspire de objet.__dict__ pour lister les attributs
et leur valeur respective. Cependant, on retourne second.
Le booléen second est utilisé quand l'un des attributs de
l'objet fait référence à un autre objet BaseObj. Dans ce cas,
on enregistre le nom de sa collection et son ObjetID.
Cependant, si l'objet cible n'a pas encore été enregistré,
il n'a pas d'_id. Il faut donc enregistrer le premier objet
sans cet attribut, puis enregistrer le second, puis réenregistrer
le premier qui cette fois peut référencer le second en attribut.
"""
if isinstance(objet, dict):
attributs = objet
else:
attributs = dict(objet.__getstate__())
second = False
for cle, valeur in tuple(attributs.items()):
if isinstance(valeur, BaseObj):
if "_id" in valeur.__dict__:
attributs[cle] = (self.qualname(type(valeur)),
valeur._id)
else:
del attributs[cle]
second = True
elif isinstance(valeur, list):
attributs[cle] = valeur = list(valeur)
sous = self.extraire_liste(valeur)
if sous:
second = True
elif isinstance(valeur, dict):
attributs[cle] = valeur = dict(valeur)
sous, r = self.extraire_attributs(valeur)
if sous:
second = True
return second, attributs
def extraire_liste(self, liste):
"""Extrait les valeurs de la liste."""
copie = []
second = False
for valeur in liste:
if isinstance(valeur, BaseObj):
if "_id" in valeur.__dict__:
valeur = (self.qualname(type(valeur)), valeur._id)
else:
second = True
continue
elif isinstance(valeur, list):
valeur = list(valeur)
sous = self.extraire_liste(valeur)
if sous:
second = True
continue
elif isinstance(valeur, dict):
valeur = dict(valeur)
sous = self.extraire_attributs(valeur)
if sous:
second = True
continue
copie.append(valeur)
liste[:] = copie
return second
@staticmethod
def qualname(classe):
return classe.__module__ + "." + classe.__name__
| vlegoff/tsunami | src/primaires/supenr/__init__.py | Python | bsd-3-clause | 20,738 |
import os
import logging
class Config(object):
# environment
DEBUG = False
TESTING = False
PRODUCTION = False
# log
LOG_LEVEL = logging.DEBUG
SYS_ADMINS = ['[email protected]']
SITE_NAME = 'Shiritori'
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
# form
WTF_CSRF_ENABLED = True
SECRET_KEY = 'Impossible-to-guess-secret-code-that-you-will-never-guess!!!'
PRODUCTS_PER_PAGE = 20
# email server
DEFAULT_MAIL_SENDER = 'Admin < [email protected] >'
MAIL_SERVER = 'mailtrap.io'
MAIL_PORT = 2525
MAIL_USE_SSL = False
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
# security
SECURITY_PASSWORD_HASH = 'bcrypt'
SECURITY_PASSWORD_SALT = SECRET_KEY
SECURITY_REGISTERABLE = True
SECURITY_CONFIRMABLE = False
SECURITY_RECOVERABLE = True
SECURITY_TRACKABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SQLALCHEMY_TRACK_MODIFICATIONS=False
| Rassilion/shiritori | website/config.py | Python | mit | 1,025 |
from .mockups import *
| polyrabbit/WeCron | WeCron/common/tests/__init__.py | Python | gpl-3.0 | 23 |
# Posterior for z
# Author: Aleyna Kara
# This file is translated from sensorFusionUnknownPrec.m
import superimport
import pyprobml_utils as pml
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
xs, ys = [1.1, 1.9], [2.9, 4.1]
nx, ny = len(xs), len(ys)
xbar = np.mean(xs)
ybar = np.mean(ys)
sx = np.sum((xs - xbar)**2)/nx
sy = np.sum((ys - ybar)**2)/ny
# MLE
lamx, lamy = 1/sx, 1 /sy
post_prec = (nx * lamx + ny*lamy)
theta = (xbar * nx * lamx + ybar * ny * lamy) / post_prec
post_var = 1/post_prec
# iterate the fixed point iterations
for _ in range(10):
lamx = nx/np.sum((xs - theta)**2)
lamy = ny/np.sum((ys - theta)**2)
theta = (xbar * nx * lamx + ybar * ny* lamy)/(nx * lamx + ny * lamy);
post_var = 1/(nx * lamx + ny * lamy)
start, end, n = -2, 6, 81
grid_theta = np.linspace(start, end, n)
plt.plot(grid_theta, multivariate_normal.pdf(grid_theta, mean=theta, cov=np.sqrt(post_var)), 'b')
plt.xlim([start, end])
plt.ylim(bottom=0)
pml.savefig('sensorFusion2Gauss.pdf', dpi=300)
plt.show()
# Bayesian analysis
fx = (grid_theta - xbar)**2 + sx
fy = (grid_theta - ybar)**2 + sy
post = (1/fx) * (1/fy)
plt.plot(grid_theta, post, 'b')
plt.xlim([start, end])
plt.ylim(bottom=0)
pml.savefig('sensorFusion2Nongauss.pdf', dpi=300)
plt.show() | probml/pyprobml | scripts/sensor_fusion_unknown_prec.py | Python | mit | 1,301 |
import os
import platform
import tempfile
import unittest
import mock
import numpy
import six
from chainer import cuda
from chainer import functions
from chainer import links
from chainer import sequential
from chainer import testing
from chainer.testing import attr
from chainer import variable
class TestSequential(unittest.TestCase):
def setUp(self):
self.l1 = links.Linear(None, 3)
self.l2 = links.Linear(3, 2)
self.l3 = links.Linear(2, 3)
# s1: l1 -> l2
self.s1 = sequential.Sequential(self.l1)
self.s1.append(self.l2)
# s2: s1 (l1 -> l2) -> l3
self.s2 = sequential.Sequential(self.s1)
self.s2.append(self.l3)
def test_init(self):
self.assertIs(self.s1[0], self.l1)
self.assertEqual(self.l1.name, '0')
self.assertIs(self.s2[0], self.s1)
self.assertEqual(self.s1.name, '0')
with self.assertRaises(ValueError):
sequential.Sequential(0)
def test_append(self):
self.assertIs(self.s2[1], self.l3)
self.assertEqual(self.l2.name, '1')
def test_iter(self):
links = list(self.s2)
self.assertEqual(2, len(links))
self.assertIs(links[0], self.s1)
self.assertIs(links[1], self.l3)
def test_len(self):
self.assertIs(len(self.s1), 2)
self.assertIs(len(self.s2), 2)
def test_copy(self):
s2 = self.s2.copy()
self.assertIs(s2.name, None)
self.assertIsInstance(s2._children, list)
self.assertIsNot(s2[0], self.s1)
self.assertEqual(s2[0].name, '0')
self.assertIsInstance(s2[0]._children, list)
self.assertIsNot(s2[0][0], self.l1)
self.assertEqual(s2[0][0].name, '0')
self.assertIsNot(s2[0][0].b, self.l1.b)
self.assertIs(s2[0][0].b.data, self.l1.b.data)
self.assertIs(s2[0][0].b.grad, None)
self.assertIsNot(s2[0][1], self.l2)
self.assertEqual(s2[0][1].name, '1')
self.assertIsNot(s2[0][1].W, self.l2.W)
self.assertIs(s2[0][1].W.data, self.l2.W.data)
self.assertIs(s2[0][1].W.grad, None)
self.assertIsNot(s2[1], self.l3)
self.assertEqual(s2[1].name, '1')
self.assertIsNot(s2[1].W, self.l3.W)
self.assertIs(s2[1].W.data, self.l3.W.data)
self.assertIs(s2[1].W.grad, None)
@attr.gpu
def test_copy_and_send_to_gpu(self):
s2 = self.s2.copy()
self.s2.to_gpu()
self.assertIsInstance(self.s2[0][0].b.data, cuda.cupy.ndarray)
self.assertIsInstance(self.s2[0][1].W.data, cuda.cupy.ndarray)
self.assertIsInstance(s2[0][0].b.data, numpy.ndarray)
self.assertIsInstance(s2[0][1].W.data, numpy.ndarray)
@attr.gpu
def test_copy_and_send_to_gpu_2(self):
s2 = self.s2.copy()
s2.to_gpu()
self.assertIsInstance(self.s2[0][0].b.data, numpy.ndarray)
self.assertIsInstance(self.s2[0][1].W.data, numpy.ndarray)
self.assertIsInstance(s2[0][0].b.data, cuda.cupy.ndarray)
self.assertIsInstance(s2[0][1].W.data, cuda.cupy.ndarray)
@attr.multi_gpu(2)
def test_copy_and_send_to_gpu_multi(self):
s2 = self.s2.copy()
self.s2.to_gpu(0)
s2.to_gpu(1)
self.assertEqual(self.s2[0][0].b.data.device.id, 0)
self.assertEqual(self.s2[0][1].W.data.device.id, 0)
self.assertEqual(s2[0][0].b.data.device.id, 1)
self.assertEqual(s2[0][1].W.data.device.id, 1)
def test_to_cpu_on_cpu(self):
x1 = self.l1.b.data
gx1 = self.l1.b.grad
x2 = self.l2.W.data
gx2 = self.l2.W.grad
x3 = self.l3.W.data
gx3 = self.l3.W.grad
self.s2.to_cpu()
self.assertIs(self.l1.b.data, x1)
self.assertIs(self.l1.b.grad, gx1)
self.assertIs(self.l2.W.data, x2)
self.assertIs(self.l2.W.grad, gx2)
self.assertIs(self.l3.W.data, x3)
self.assertIs(self.l3.W.grad, gx3)
@attr.gpu
def test_to_cpu(self):
self.s2.to_gpu()
self.s2.to_cpu()
self.assertIs(self.s2.xp, numpy)
self.assertIs(self.s1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.b.data, numpy.ndarray)
self.assertIsInstance(self.l1.b.grad, numpy.ndarray)
self.assertIsInstance(self.l2.W.data, numpy.ndarray)
self.assertIsInstance(self.l2.W.grad, numpy.ndarray)
self.assertIsInstance(self.l3.W.data, numpy.ndarray)
self.assertIsInstance(self.l3.W.grad, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
cupy = cuda.cupy
self.s2.to_gpu()
self.assertIs(self.s2.xp, cupy)
self.assertIs(self.s1.xp, cupy)
self.assertIs(self.l1.xp, cupy)
self.assertIs(self.l2.xp, cupy)
self.assertIs(self.l3.xp, cupy)
self.assertIsInstance(self.l1.b.data, cupy.ndarray)
self.assertIsInstance(self.l1.b.grad, cupy.ndarray)
self.assertIsInstance(self.l2.W.data, cupy.ndarray)
self.assertIsInstance(self.l2.W.grad, cupy.ndarray)
self.assertIsInstance(self.l3.W.data, cupy.ndarray)
self.assertIsInstance(self.l3.W.grad, cupy.ndarray)
def test_params(self):
params = list(self.s2.params())
self.assertEqual({id(p) for p in params},
{id(self.l1.W), id(self.l1.b),
id(self.l2.W), id(self.l2.b),
id(self.l3.W), id(self.l3.b)})
def test_params_skip_uninit(self):
params = list(self.s2.params(include_uninit=False))
self.assertEqual({id(p) for p in params},
{id(self.l1.b), id(self.l2.W), id(self.l2.b),
id(self.l3.W), id(self.l3.b)})
def test_namedparams(self):
namedparams = list(self.s2.namedparams())
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/0/0/W', id(self.l1.W)),
('/0/0/b', id(self.l1.b)),
('/0/1/W', id(self.l2.W)),
('/0/1/b', id(self.l2.b)),
('/1/W', id(self.l3.W)),
('/1/b', id(self.l3.b))})
def test_namedparams_skip_uninit(self):
namedparams = list(self.s2.namedparams(include_uninit=False))
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/0/0/b', id(self.l1.b)),
('/0/1/W', id(self.l2.W)),
('/0/1/b', id(self.l2.b)),
('/1/W', id(self.l3.W)),
('/1/b', id(self.l3.b))})
def test_links(self):
links = list(self.s2.links())
self.assertEqual({id(l) for l in links},
{id(l) for l in [self.l1, self.l2, self.l3,
self.s1, self.s2]})
def test_links_skipself(self):
links = list(self.s2.links(skipself=True))
self.assertEqual({id(l) for l in links},
{id(l) for l in [self.l1, self.l2, self.l3, self.s1]})
def test_namedlinks(self):
namedlinks = list(self.s2.namedlinks())
self.assertEqual({(name, id(l)) for name, l in namedlinks},
{('/', id(self.s2)),
('/0', id(self.s1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))})
def test_namedlinks_skipself(self):
namedlinks = list(self.s2.namedlinks(skipself=True))
self.assertEqual({(name, id(l)) for name, l in namedlinks},
{('/0', id(self.s1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))})
def test_children(self):
self.assertEqual(tuple(id(c) for c in self.s2.children()),
(id(self.s1), id(self.l3)))
self.assertEqual(tuple(id(c) for c in self.s1.children()),
(id(self.l1), id(self.l2)))
def test_copyparams(self):
l1 = links.Linear(None, 3)
l2 = links.Linear(3, 2)
l3 = links.Linear(2, 3)
s1 = sequential.Sequential(l1, l2)
s2 = sequential.Sequential(s1, l3)
l1.b.data.fill(0)
l2.W.data.fill(1)
l2.b.data.fill(2)
l3.W.data.fill(3)
l3.b.data.fill(4)
self.s2.copyparams(s2)
numpy.testing.assert_array_equal(self.l1.b.data, l1.b.data)
numpy.testing.assert_array_equal(self.l2.W.data, l2.W.data)
numpy.testing.assert_array_equal(self.l2.b.data, l2.b.data)
numpy.testing.assert_array_equal(self.l3.W.data, l3.W.data)
numpy.testing.assert_array_equal(self.l3.b.data, l3.b.data)
def test_zerograds(self):
with testing.assert_warns(DeprecationWarning):
self.s2.zerograds()
numpy.testing.assert_array_equal(self.l1.b.grad, numpy.zeros((3,)))
numpy.testing.assert_array_equal(
self.l2.W.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(
self.l3.W.grad, numpy.zeros((3, 2)))
self.l1.W.initialize((3, 2))
numpy.testing.assert_array_equal(
self.l1.W.grad, numpy.zeros((3, 2)))
def test_cleargrads(self):
self.s2.cleargrads()
self.assertIsNone(self.l1.b.grad)
self.assertIsNone(self.l2.W.grad)
self.assertIsNone(self.l2.b.grad)
self.assertIsNone(self.l3.W.grad)
self.assertIsNone(self.l3.b.grad)
self.l1.W.initialize((2, 3))
self.assertIsNone(self.l1.W.grad)
def test_addgrads(self):
l1 = links.Linear(2, 3)
l2 = links.Linear(3, 2)
l3 = links.Linear(2, 3)
s1 = sequential.Sequential(l1, l2)
s2 = sequential.Sequential(s1, l3)
l1.b.grad.fill(1)
l2.W.grad.fill(2)
l2.b.grad.fill(3)
l3.W.grad.fill(4)
l3.b.grad.fill(5)
l1.W.grad.fill(6)
self.l1.b.grad.fill(-1)
self.l2.W.grad.fill(-2)
self.l2.b.grad.fill(-3)
self.l3.W.grad.fill(-4)
self.l3.b.grad.fill(-5)
self.l1.W.cleargrad()
self.s2.addgrads(s2)
numpy.testing.assert_array_equal(self.l1.b.grad, numpy.zeros((3,)))
numpy.testing.assert_array_equal(self.l1.W.grad, l1.W.grad)
numpy.testing.assert_array_equal(self.l2.W.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.b.grad, numpy.zeros((2,)))
numpy.testing.assert_array_equal(self.l3.W.grad, numpy.zeros((3, 2)))
numpy.testing.assert_array_equal(self.l3.b.grad, numpy.zeros((3,)))
def test_serialize(self):
l1 = links.Linear(None, 1)
l2 = links.Linear(None, 3)
with l2.init_scope():
l2.x = variable.Parameter(0, 2)
s1 = sequential.Sequential(l1, l2)
mocks = {'0': mock.MagicMock(), '1': mock.MagicMock()}
serializer = mock.MagicMock()
serializer.__getitem__.side_effect = lambda k: mocks[k]
serializer.return_value = None
mocks['0'].return_value = None
mocks['1'].return_value = None
s1.serialize(serializer)
self.assertEqual(serializer.call_count, 0)
self.assertEqual(serializer.__getitem__.call_count, 2)
serializer.__getitem__.assert_any_call('0')
serializer.__getitem__.assert_any_call('1')
mocks['0'].assert_any_call('W', None)
mocks['0'].assert_any_call('b', l1.b.data)
mocks['1'].assert_any_call('W', None)
mocks['1'].assert_any_call('b', l2.b.data)
mocks['1'].assert_any_call('x', l2.x.data)
def test_getitem(self):
self.assertIs(self.s1[0], self.l1)
def test_delitem(self):
del self.s1[0]
self.assertIsNot(self.s1[0], self.l1)
self.assertIs(self.s1[0], self.l2)
def test_reversed(self):
layers = list(reversed(self.s2))
self.assertIs(layers[0], self.l3)
self.assertIs(layers[1], self.s1)
def test_contains(self):
self.assertTrue(self.l1 in self.s1)
self.assertTrue(self.l2 in self.s1)
self.assertTrue(self.s1 in self.s2)
self.assertTrue(self.l3 in self.s2)
self.assertFalse(self.l3 in self.s1)
self.assertFalse(self.l2 in self.s2)
def test_add(self):
l1 = links.Linear(3, 2)
l2 = functions.relu
other = sequential.Sequential(l1, l2)
added = self.s2 + other
self.assertEqual(len(added), 4)
self.assertIs(added[0], self.s1)
self.assertIs(added[1], self.l3)
self.assertIs(added[2], l1)
self.assertIs(added[3], l2)
with self.assertRaises(ValueError):
self.s2 + 0
def test_iadd(self):
l4 = links.Linear(3, 1)
self.s2 += sequential.Sequential(l4)
self.assertIs(self.s2[0], self.s1)
self.assertIs(self.s2[1], self.l3)
self.assertIs(self.s2[2], l4)
with self.assertRaises(ValueError):
self.s2 += 0
def test_call(self):
l1 = mock.MagicMock()
l2 = mock.MagicMock()
l3 = mock.MagicMock()
model = sequential.Sequential(l1, l2, l3)
x = numpy.arange(2).reshape(1, 2).astype('f')
y = model(x)
l1.assert_called_once()
l2.assert_called_once()
l3.assert_called_once()
y = self.s1(x)
self.assertIs(y.creator.inputs[1].data, self.l2.W.data)
def test_call_with_multiple_inputs(self):
model = sequential.Sequential(
lambda x, y: (x * 2, y * 3, x + y),
lambda x, y, z: x + y + z
)
y = model(2, 3)
self.assertEqual(y, 18)
def test_extend(self):
l1 = links.Linear(3, 2)
l2 = links.Linear(2, 3)
s3 = sequential.Sequential(l1, l2)
self.s2.extend(s3)
self.assertEqual(len(self.s2), 4)
self.assertIs(self.s2[2], s3[0])
self.assertIs(self.s2[3], s3[1])
def test_insert(self):
l1 = links.Linear(3, 3)
self.s1.insert(1, l1)
self.assertEqual(len(self.s1), 3)
self.assertIs(self.s1[1], l1)
def test_remove(self):
self.s2.remove(self.s1)
self.assertEqual(len(self.s2), 1)
self.assertIs(self.s2[0], self.l3)
def test_remove_by_layer_type(self):
self.s2.insert(2, functions.relu)
self.s2.remove_by_layer_type('Linear')
self.assertEqual(len(self.s2), 2)
self.assertIs(self.s2[0], self.s1)
self.assertIs(self.s2[1], functions.relu)
def test_pop(self):
l3 = self.s2.pop(1)
self.assertIs(l3, self.l3)
self.assertEqual(len(self.s2), 1)
def test_clear(self):
self.s2.clear()
self.assertEqual(len(self.s2), 0)
def test_index(self):
self.assertEqual(self.s2.index(self.s1), 0)
self.assertEqual(self.s2.index(self.l3), 1)
def test_count(self):
self.s2.insert(1, functions.relu)
self.s2.insert(3, functions.relu)
self.assertEqual(self.s2.count(functions.relu), 2)
self.assertEqual(self.s2.count(self.s1), 1)
self.assertEqual(self.s2.count(self.l3), 1)
self.s2.append(self.l3)
self.assertEqual(self.s2.count(self.l3), 2)
def test_count_by_layer_type(self):
self.assertEqual(self.s2.count_by_layer_type('Linear'), 1)
self.s2.insert(1, functions.relu)
self.s2.insert(3, functions.relu)
self.assertEqual(self.s2.count_by_layer_type('relu'), 2)
def test_pickle_without_lambda(self):
fd, path = tempfile.mkstemp()
six.moves.cPickle.dump(self.s2, open(path, 'wb'))
s2 = six.moves.cPickle.load(open(path, 'rb'))
self.assertEqual(len(s2), len(self.s2))
numpy.testing.assert_array_equal(s2[0][0].b.data, self.s2[0][0].b.data)
numpy.testing.assert_array_equal(s2[0][1].W.data, self.s2[0][1].W.data)
numpy.testing.assert_array_equal(s2[0][1].b.data, self.s2[0][1].b.data)
numpy.testing.assert_array_equal(s2[1].W.data, self.s2[1].W.data)
numpy.testing.assert_array_equal(s2[1].b.data, self.s2[1].b.data)
for l1, l2 in zip(s2, self.s2):
self.assertIsNot(l1, l2)
os.close(fd)
os.remove(path)
def test_pickle_with_lambda(self):
self.s2.append(lambda x: x)
with self.assertRaises(ValueError):
with tempfile.TemporaryFile() as fp:
six.moves.cPickle.dump(self.s2, fp)
def test_repr(self):
bits, pl = platform.architecture()
self.assertEqual(
str(self.s1),
'0\tLinear\tW(None)\tb{}\t\n'
'1\tLinear\tW{}\tb{}\t\n'.format(
self.s1[0].b.shape, self.s1[1].W.shape, self.s1[1].b.shape))
def test_repeat_with_init(self):
# s2 ((l1 -> l2) -> l3) -> s2 ((l1 -> l2) -> l3)
ret = self.s2.repeat(2)
self.assertIsNot(ret[0], self.s2)
self.assertIs(type(ret[0]), type(self.s2))
self.assertIsNot(ret[1], self.s2)
self.assertIs(type(ret[1]), type(self.s2))
# b is filled with 0, so they should have the same values
numpy.testing.assert_array_equal(
ret[0][0][0].b.array, ret[1][0][0].b.array)
# W is initialized randomly, so they should be different
self.assertFalse(
numpy.array_equal(ret[0][1].W.array, self.l3.W.array))
# And the object should also be different
self.assertIsNot(ret[0][1].W.array, self.l3.W.array)
# Repeated elements should be different objects
self.assertIsNot(ret[0], ret[1])
# Also for the arrays
self.assertIsNot(ret[0][1].W.array, ret[1][1].W.array)
# And values should be different
self.assertFalse(
numpy.array_equal(ret[0][1].W.array, ret[1][1].W.array))
self.assertEqual(len(ret), 2)
ret = self.s2.repeat(0, mode='init')
self.assertEqual(len(ret), 0)
def test_repeat_with_copy(self):
# s2 ((l1 -> l2) -> l3) -> s2 ((l1 -> l2) -> l3)
ret = self.s2.repeat(2, mode='copy')
self.assertIsNot(ret[0], self.s2)
self.assertIs(type(ret[0]), type(self.s2))
self.assertIsNot(ret[1], self.s2)
self.assertIs(type(ret[1]), type(self.s2))
self.assertIsNot(ret[0], ret[1])
# b is filled with 0, so they should have the same values
numpy.testing.assert_array_equal(
ret[0][0][0].b.array, ret[1][0][0].b.array)
# W is shallowy copied, so the values should be same
numpy.testing.assert_array_equal(ret[0][1].W.array, self.l3.W.array)
# But the object should be different
self.assertIsNot(ret[0][1].W.array, self.l3.W.array)
# Repeated elements should be different objects
self.assertIsNot(ret[0][0], ret[1][0])
# Also for the arrays
self.assertIsNot(ret[0][1].W.array, ret[1][1].W.array)
# But the values should be same
numpy.testing.assert_array_equal(ret[0][1].W.array, ret[1][1].W.array)
self.assertEqual(len(ret), 2)
ret = self.s2.repeat(0, mode='copy')
self.assertEqual(len(ret), 0)
def test_repeat_with_share(self):
# s2 ((l1 -> l2) -> l3) -> s2 ((l1 -> l2) -> l3)
ret = self.s2.repeat(2, mode='share')
self.assertIsNot(ret[0], self.s2)
self.assertIs(type(ret[0]), type(self.s2))
self.assertIsNot(ret[1], self.s2)
self.assertIs(type(ret[1]), type(self.s2))
# b is filled with 0, so they should have the same values
numpy.testing.assert_array_equal(
ret[0][0][0].b.data, ret[1][0][0].b.data)
# W is shallowy copied, so the values should be same
numpy.testing.assert_array_equal(ret[0][1].W.array, self.l3.W.array)
numpy.testing.assert_array_equal(ret[1][1].W.array, self.l3.W.array)
# And the object should also be same
self.assertIs(ret[0][1].W.array, self.l3.W.array)
self.assertIs(ret[1][1].W.array, self.l3.W.array)
# Repeated element itself should be different
self.assertIsNot(ret[0], ret[1])
self.assertEqual(len(ret), 2)
ret = self.s2.repeat(0, mode='share')
self.assertEqual(len(ret), 0)
def test_flatten(self):
flattened_s2 = self.s2.flatten()
self.assertIs(flattened_s2[0], self.l1)
self.assertIs(flattened_s2[1], self.l2)
self.assertIs(flattened_s2[2], self.l3)
testing.run_module(__name__, __file__)
| rezoo/chainer | tests/chainer_tests/test_sequential.py | Python | mit | 20,769 |
#!/usr/bin/env python
import freenect
import cv
from misc.demo import frame_convert
cv.NamedWindow('Depth')
cv.NamedWindow('RGB')
keep_running = True
def display_depth(dev, data, timestamp):
global keep_running
cv.ShowImage('Depth', frame_convert.pretty_depth_cv(data))
if cv.WaitKey(10) == 27:
keep_running = False
def display_rgb(dev, data, timestamp):
global keep_running
cv.ShowImage('RGB', frame_convert.video_cv(data))
if cv.WaitKey(10) == 27:
keep_running = False
def body(*args):
if not keep_running:
raise freenect.Kill
print('Press ESC in window to stop')
freenect.runloop(depth=display_depth,
video=display_rgb,
body=body)
| Dining-Engineers/left-luggage-detection | misc/demo/demo_cv_async.py | Python | gpl-2.0 | 732 |
from launcher.utils.imports import import_submodules
import_submodules(globals(),__name__,__path__)
| hikelee/launcher | launcher/views/mixins/__init__.py | Python | mit | 101 |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
#url(r'^$', TemplateView.as_view(template_name='base.html')),
#url(r'^$', TemplateView.as_view(template_name='home.html')),
url(r'^$', 'newrelic_python_kata.views.home', name='home'),
url(r'^kata1/$', 'employees.views.list', name='list'),
url(r'^kata2/$', 'employees.views.filtering', name='query'),
url(r'^kata3/$', 'factorial.views.factorial_h', name='factorial'),
url(r'^kata4/$', 'weather.views.weather', name='weather'),
# Examples:
# url(r'^newrelic_python_kata/', include('newrelic_python_kata.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG is False: #if DEBUG is True it will be served automatically
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
)
| newrelic/newrelic-python-kata | newrelic_python_kata/urls.py | Python | mit | 1,300 |
import time, os, sys, math
from string import split, join
from altitude import decode_alt
import cpr
import exceptions
def charmap(d):
if d > 0 and d < 27:
retval = chr(ord("A")+d-1)
elif d == 32:
retval = " "
elif d > 47 and d < 58:
retval = chr(ord("0")+d-48)
else:
retval = " "
return retval
def decodeHeader(header):
icao24 = header & 0xFFFFFF
capability = (header >> 24) & 0x7
return [icao24, capability]
def decodeIdentification(msg):
frmt = (msg >> 51) & 0x1F
catset = ['A', 'B', 'C', 'D'][4-frmt]
emitter_category = (msg >> 48) & 0x7 # see Doc 9871 Table A-2-8
callsign = ""
for i in range(0, 8):
callsign += charmap( msg >> (42-6*i) & 0x3F)
return [catset, emitter_category, callsign]
def parseAirbornePosition(icao24, msg, receiver_pos, decoder, ts):
surveillance_status = (msg >> 49) & 0x3
nic_suppl = bool((msg >> 48) & 1)
enc_alt = (msg >> 36) & 0x0FFF
time = bool((msg >> 35) & 1)
cpr_format = (msg >> 34) & 1
encoded_lat = (msg >> 17) & 0x1FFFF
encoded_lon = msg & 0x1FFFF
if encoded_lat == 0 or encoded_lon == 0:
raise cpr.CPRNoPositionError
altitude = decode_alt(enc_alt, False)
decoder.set_location(receiver_pos)
[decoded_lat, decoded_lon] = decoder.decode(icao24, encoded_lat, encoded_lon, cpr_format, ts, False)
return [surveillance_status, nic_suppl, altitude, time, cpr_format, decoded_lat, decoded_lon]
def decodePosition(icao24, msg, receiver_pos, decoder, ts):
frmt = (msg >> 51) & 0x1F
# position
[surveillance_status, nic_suppl, altitude, time, cpr_format, decoded_lat, decoded_lon] = \
parseAirbornePosition(icao24, msg, receiver_pos, decoder, ts)
alttype = "Baro" if frmt >= 9 and frmt <= 18 else "GNSS"
return [surveillance_status, nic_suppl, alttype, altitude, time, cpr_format, decoded_lat, decoded_lon]
def decodeVelocity(msg):
alt_geo_diff = msg & 0x7F - 1
above_below = bool((msg >> 7) & 1)
if above_below:
alt_geo_diff = 0 - alt_geo_diff;
vert_spd = float((msg >> 10) & 0x1FF - 1)
ud = bool((msg >> 19) & 1)
if ud:
vert_spd = 0 - vert_spd
baro = bool((msg >> 20) & 1)
ns_vel = float((msg >> 21) & 0x3FF - 1)
ns = bool((msg >> 31) & 1)
ew_vel = float((msg >> 32) & 0x3FF - 1)
ew = bool((msg >> 42) & 1)
subtype = (msg >> 48) & 0x07
if subtype == 0x02:
ns_vel *= 4
ew_vel *= 4
vert_spd *= 64
alt_geo_diff *= 25
velocity = math.hypot(ns_vel, ew_vel)
if ew:
ew_vel = 0 - ew_vel
if ns_vel == 0:
heading = 0
else:
heading = math.atan(float(ew_vel) / float(ns_vel)) * (180.0 / math.pi)
if ns:
heading = 180 - heading
if heading < 0:
heading += 360
nac = (msg >> 43) & 7
adsb_conflict_detection = bool((msg >> 46) & 1)
intent_change = bool((msg >> 47) & 1)
supersonic = (subtype == 2)
return [supersonic, intent_change, adsb_conflict_detection, nac, velocity, heading, baro, vert_spd, alt_geo_diff]
def decodeStatus(msg):
subtype = (msg >> 48) & 0x7
emergency = (msg >> 45) & 0x7
reserved = msg & 0x1FFFFFFFFFFF
return [subtype, emergency, reserved]
def decodeOpStatus(msg):
version = (msg >> 13) & 0x7
nic_suppl = (msg >> 12) & 1
nac_pos = (msg >> 8) & 0xF
sil = (msg >> 4) & 0x3
magnetic_north = (msg >> 2) & 1
reserved = msg & 3
# airborne status message
capability = (msg >> 32) & 0xFFFF
opmode = (msg >> 16) & 0xFFFF
gva = (msg >> 6) & 0x3
nic_baro = (msg >> 3) & 1
return [capability, opmode, version, nic_suppl, nac_pos, gva, sil, nic_baro, magnetic_north, reserved]
| koppa/ADSB-Sniffer | evaluation/adsb/decoder.py | Python | gpl-3.0 | 3,826 |
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2016 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from django.core.urlresolvers import reverse
from .. import factories as f
from taiga.permissions.permissions import MEMBERS_PERMISSIONS
from tests.utils import disconnect_signals, reconnect_signals
pytestmark = pytest.mark.django_db(transaction=True)
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def searches_initial_data():
m = type("InitialData", (object,), {})()
m.project1 = f.ProjectFactory.create()
m.project2 = f.ProjectFactory.create()
m.member1 = f.MembershipFactory(project=m.project1,
role__project=m.project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
m.member2 = f.MembershipFactory(project=m.project1,
role__project=m.project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.RoleFactory(project=m.project2)
m.points1 = f.PointsFactory(project=m.project1, value=None)
m.points2 = f.PointsFactory(project=m.project2, value=None)
m.role_points1 = f.RolePointsFactory.create(role=m.project1.roles.all()[0],
points=m.points1,
user_story__project=m.project1)
m.role_points2 = f.RolePointsFactory.create(role=m.project1.roles.all()[0],
points=m.points1,
user_story__project=m.project1,
user_story__description="Back to the future")
m.role_points3 = f.RolePointsFactory.create(role=m.project2.roles.all()[0],
points=m.points2,
user_story__project=m.project2)
m.us1 = m.role_points1.user_story
m.us2 = m.role_points2.user_story
m.us3 = m.role_points3.user_story
m.tsk1 = f.TaskFactory.create(project=m.project2)
m.tsk2 = f.TaskFactory.create(project=m.project1)
m.tsk3 = f.TaskFactory.create(project=m.project1, subject="Back to the future")
m.iss1 = f.IssueFactory.create(project=m.project1, subject="Backend and Frontend")
m.iss2 = f.IssueFactory.create(project=m.project2)
m.iss3 = f.IssueFactory.create(project=m.project1)
m.wiki1 = f.WikiPageFactory.create(project=m.project1)
m.wiki2 = f.WikiPageFactory.create(project=m.project1, content="Frontend, future")
m.wiki3 = f.WikiPageFactory.create(project=m.project2)
return m
def test_search_all_objects_in_my_project(client, searches_initial_data):
data = searches_initial_data
client.login(data.member1.user)
response = client.get(reverse("search-list"), {"project": data.project1.id})
assert response.status_code == 200
assert response.data["count"] == 8
assert len(response.data["userstories"]) == 2
assert len(response.data["tasks"]) == 2
assert len(response.data["issues"]) == 2
assert len(response.data["wikipages"]) == 2
def test_search_all_objects_in_project_is_not_mine(client, searches_initial_data):
data = searches_initial_data
client.login(data.member1.user)
response = client.get(reverse("search-list"), {"project": data.project2.id})
assert response.status_code == 200
assert response.data["count"] == 0
def test_search_text_query_in_my_project(client, searches_initial_data):
data = searches_initial_data
client.login(data.member1.user)
response = client.get(reverse("search-list"), {"project": data.project1.id, "text": "future"})
assert response.status_code == 200
assert response.data["count"] == 3
assert len(response.data["userstories"]) == 1
assert len(response.data["tasks"]) == 1
assert len(response.data["issues"]) == 0
assert len(response.data["wikipages"]) == 1
response = client.get(reverse("search-list"), {"project": data.project1.id, "text": "back"})
assert response.status_code == 200
assert response.data["count"] == 3
assert len(response.data["userstories"]) == 1
assert len(response.data["tasks"]) == 1
# Back is a backend substring
assert len(response.data["issues"]) == 1
assert len(response.data["wikipages"]) == 0
def test_search_text_query_with_an_invalid_project_id(client, searches_initial_data):
data = searches_initial_data
client.login(data.member1.user)
response = client.get(reverse("search-list"), {"project": "new", "text": "future"})
assert response.status_code == 404
| Rademade/taiga-back | tests/integration/test_searches.py | Python | agpl-3.0 | 5,639 |
"""
You are given an array of desired filenames in the order of their creation. Since two files cannot have equal names, the one which comes later will have an addition to its name in a form of (k), where k is the smallest positive integer such that the obtained name is not used yet.
Return an array of names that will be given to the files.
Example
For names = ["doc", "doc", "image", "doc(1)", "doc"], the output should be
fileNaming(names) = ["doc", "doc(1)", "image", "doc(1)(1)", "doc(2)"].
Input/Output
[execution time limit] 4 seconds (py)
[input] array.string names
Guaranteed constraints:
5 <= names.length <= 15,
1 <= names[i].length <= 15.
[output] array.string
"""
def fileNaming(names):
new_names = []
for name in names:
if name not in new_names:
new_names.append(name)
else:
for x in range(1, 16):
if "{}({})".format(name, x) not in new_names:
new_names.append("{}({})".format(name, x))
break
return new_names
if __name__ == '__main__':
print fileNaming(["doc", "doc", "image", "doc(1)", "doc"]) | coingraham/codefights | python/fileNaming/fileNaming.py | Python | mit | 1,136 |
from settings import RED, YELLOW, GREEN, WHITE
from random import randint
class Scheme(object):
visual_generic = "{}|{}|{}\n{}|{}|{}\n{}|{}|{}"
def __init__(self, scheme, mine_class):
if not (0 < scheme < 512):
raise ValueError("Scheme must be between 1 and 511")
self.scheme = scheme
self.mine_class = mine_class
@property
def bin_scheme(self):
return '{0:09b}'.format(self.scheme)
def get_relative_pos(self):
array = [self.bin_scheme[i:i+3] for i in range(0, len(self.bin_scheme), 3)]
pos = []
for y in range(3):
for x in range(3):
if array[x][y] == '1':
pos.append((x, y))
return sorted(pos)
def get_absolute_pos(self, pos_x, pos_y):
pos = []
for x, y in self.get_relative_pos():
pos.append((pos_x + x, pos_y + y))
return pos
def visualise(self):
return self.visual_generic.format(
*['x' if elem == '1' else '_' for elem in self.bin_scheme]
)
def place(self, grid):
grid_len = len(grid)
position = (randint(0, grid_len-1), randint(0, grid_len-1))
absolute_pos = self.get_absolute_pos(*position)
for x, y in absolute_pos:
# check if no mine is here
try:
if grid[x][y] != 0 or (x == grid_len-1 and y == grid_len-1):
return False
except IndexError:
return False
# check if the same mine is near (3x3 square) - Moore neighbourhood
for i in range(-1, 2):
for j in range(-1, 2):
try:
if grid[x+i][y+j] == 0:
continue
if grid[x+i][y+j].color == self.mine_class.color:
return False
except IndexError:
pass
for x, y in absolute_pos:
grid[x][y] = self.mine_class()
return True
class BaseField(object):
color = WHITE
damage = 0
def __init__(self):
self.reset_radiation()
def reset_radiation(self):
self.radiation = {
RED: [0],
YELLOW: [0],
GREEN: [0],
}
self.max_radiation = {
RED: 0,
YELLOW: 0,
GREEN: 0,
}
def compute_max(self):
for key in self.max_radiation:
self.max_radiation[key] = max(self.radiation[key])
def __repr__(self):
return '{}_{}:{}'.format(
self.__class__.__name__, self.pos_x, self.pos_y)
class RedMine(BaseField):
color = RED
damage = 50
class YellowMine(BaseField):
color = YELLOW
damage = 20
class GreenMine(BaseField):
color = GREEN
damage = 10
if __name__ == '__main__':
example_scheme = Scheme(57, YellowMine)
print example_scheme.get_absolute_pos(12, 12)
print example_scheme.visualise()
print example_scheme.fetch_mines(12, 12)
| Nozdi/miner | board/mine.py | Python | mit | 3,065 |
#!/usr/bin/env python3
"""regex-tokenize input text"""
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import logging
import os
import sys
from fnl.nlp.strtok import SpaceTokenizer, WordTokenizer, AlnumTokenizer
__author__ = 'Florian Leitner'
__version__ = '0.0.1'
def map(text_iterator, tokenizer):
for text in text_iterator:
for token in tokenizer.tokenize(text):
print(*token, sep='\t')
epilog = 'system (default) encoding: {}'.format(sys.getdefaultencoding())
parser = ArgumentParser(
usage='%(prog)s [options] [FILE ...]',
description=__doc__, epilog=epilog,
prog=os.path.basename(sys.argv[0])
)
parser.set_defaults(loglevel=logging.WARNING)
parser.set_defaults(tokenizer=AlnumTokenizer)
parser.add_argument('files', metavar='FILE', nargs='*', type=open,
help='input file(s); if absent, read from <STDIN>')
parser.add_argument('--space', action='store_const', const=SpaceTokenizer,
dest='tokenizer', help='use space tokenizer [alnum]')
parser.add_argument('--word', action='store_const', const=WordTokenizer,
dest='tokenizer', help='user word tokenizer [alnum]')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--error', action='store_const', const=logging.ERROR,
dest='loglevel', help='error log level only [warn]')
parser.add_argument('--info', action='store_const', const=logging.INFO,
dest='loglevel', help='info log level [warn]')
parser.add_argument('--debug', action='store_const', const=logging.DEBUG,
dest='loglevel', help='debug log level [warn]')
parser.add_argument('--logfile', metavar='FILE',
help='log to file instead of <STDERR>')
args = parser.parse_args()
files = args.files if args.files else [sys.stdin]
logging.basicConfig(
filename=args.logfile, level=args.loglevel,
format='%(asctime)s %(name)s %(levelname)s: %(message)s'
)
for input_stream in files:
try:
map(input_stream, args.tokenizer())
except:
logging.exception("unexpected program error")
parser.error("unexpected program error")
| fnl/libfnl | scripts/fnltok.py | Python | agpl-3.0 | 2,812 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hickle as hkl
import numpy as np
np.random.seed(2 ** 10)
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers.core import Activation
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers import Layer
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import Conv2DTranspose
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import Conv3DTranspose
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.merge import multiply
from keras.layers.merge import concatenate
from keras.layers.core import Permute
from keras.layers.core import RepeatVector
from keras.layers.core import Dense
from keras.layers.core import Lambda
from keras.layers.core import Reshape
from keras.layers.core import Flatten
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.callbacks import LearningRateScheduler
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input
from keras.models import Model
from keras import metrics
from config_aa import *
import tb_callback
import lrs_callback
import argparse
import math
import os
import cv2
from sys import stdout
def encoder_model():
model = Sequential()
# 10x128x128
model.add(Conv3D(filters=128,
strides=(1, 4, 4),
kernel_size=(3, 11, 11),
padding='same',
input_shape=(int(VIDEO_LENGTH/2), 128, 128, 3)))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x32x32
model.add(Conv3D(filters=64,
strides=(1, 2, 2),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x16x16
model.add(Conv3D(filters=32,
strides=(1, 1, 1),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
return model
def decoder_model():
inputs = Input(shape=(10, 16, 16, 32))
# 10x64x64
conv_1 = Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 1, 1))(inputs)
x = TimeDistributed(BatchNormalization())(conv_1)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_1 = TimeDistributed(Dropout(0.5))(x)
# 10x32x32
conv_2 = Conv3DTranspose(filters=128,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2))(out_1)
x = TimeDistributed(BatchNormalization())(conv_2)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_2 = TimeDistributed(Dropout(0.5))(x)
# 10x64x64
conv_3 = Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2))(out_2)
x = TimeDistributed(BatchNormalization())(conv_3)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_3 = TimeDistributed(Dropout(0.5))(x)
# Learn alpha_1
conv3D_1 = Conv3D(filters=1,
strides=(1, 1, 1),
kernel_size=(3, 3, 3),
dilation_rate=(2, 2, 2),
padding='same')(out_3)
x = TimeDistributed(BatchNormalization())(conv3D_1)
x = TimeDistributed(Dropout(0.5))(x)
# conv3D_2 = Conv3D(filters=1,
# strides=(1, 1, 1),
# kernel_size=(3, 3, 3),
# dilation_rate=(3, 3, 3),
# padding='same')(x)
# x = TimeDistributed(BatchNormalization())(conv3D_2)
# x = TimeDistributed(Dropout(0.5))(x)
flat_1 = TimeDistributed(Flatten())(x)
dense_1 = TimeDistributed(Dense(units=64 * 64, activation='softmax'))(flat_1)
x = TimeDistributed(Dropout(0.5))(dense_1)
a = Reshape(target_shape=(10, 64, 64, 1))(x)
# Custom loss layer
class CustomLossLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomLossLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
super(CustomLossLayer, self).build(input_shape) # Be sure to call this somewhere!
def attn_loss(self, a):
attn_loss = K.sum(K.flatten(K.square(1 - K.sum(a, axis=1))), axis=-1)
return ATTN_COEFF * K.mean(attn_loss)
def call(self, inputs):
x = inputs
print (inputs.shape)
loss = self.attn_loss(x)
self.add_loss(loss, inputs=inputs)
# We do use this output.
return x
def compute_output_shape(self, input_shape):
return (input_shape[0], 10, 64, 64, 1)
x = CustomLossLayer()(a)
x = Flatten()(x)
x = RepeatVector(n=64)(x)
x = Permute((2, 1))(x)
x = Reshape(target_shape=(10, 64, 64, 64))(x)
attn_1 = multiply([out_3, x])
# 10x128x128
conv_4 = Conv3DTranspose(filters=3,
kernel_size=(3, 11, 11),
strides=(1, 2, 2),
padding='same')(attn_1)
x = TimeDistributed(BatchNormalization())(conv_4)
x = TimeDistributed(Activation('tanh'))(x)
predictions = TimeDistributed(Dropout(0.5))(x)
# x = TimeDistributed(Dropout(0.5))(x)
model = Model(inputs=inputs, outputs=predictions)
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def combine_images(X, y, generated_images):
# Unroll all generated video frames
n_frames = generated_images.shape[0] * generated_images.shape[1]
frames = np.zeros((n_frames,) + generated_images.shape[2:], dtype=generated_images.dtype)
frame_index = 0
for i in range(generated_images.shape[0]):
for j in range(generated_images.shape[1]):
frames[frame_index] = generated_images[i, j]
frame_index += 1
num = frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = frames.shape[1:]
image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=generated_images.dtype)
for index, img in enumerate(frames):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
n_frames = X.shape[0] * X.shape[1]
orig_frames = np.zeros((n_frames,) + X.shape[2:], dtype=X.dtype)
# Original frames
frame_index = 0
for i in range(X.shape[0]):
for j in range(X.shape[1]):
orig_frames[frame_index] = X[i, j]
frame_index += 1
num = orig_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = orig_frames.shape[1:]
orig_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=X.dtype)
for index, img in enumerate(orig_frames):
i = int(index / width)
j = index % width
orig_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
# Ground truth
truth_frames = np.zeros((n_frames,) + y.shape[2:], dtype=y.dtype)
frame_index = 0
for i in range(y.shape[0]):
for j in range(y.shape[1]):
truth_frames[frame_index] = y[i, j]
frame_index += 1
num = truth_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = truth_frames.shape[1:]
truth_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=y.dtype)
for index, img in enumerate(truth_frames):
i = int(index / width)
j = index % width
truth_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
return orig_image, truth_image, image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS):
if PRINT_MODEL_SUMMARY:
print (encoder.summary())
print (decoder.summary())
print (autoencoder.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
print ("Saving models to file...")
model_json = encoder.to_json()
with open(os.path.join(MODEL_DIR, "encoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = decoder.to_json()
with open(os.path.join(MODEL_DIR, "decoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = autoencoder.to_json()
with open(os.path.join(MODEL_DIR, "autoencoder.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
plot_model(encoder, to_file=os.path.join(MODEL_DIR, 'encoder.png'), show_shapes=True)
plot_model(decoder, to_file=os.path.join(MODEL_DIR, 'decoder.png'), show_shapes=True)
plot_model(autoencoder, to_file=os.path.join(MODEL_DIR, 'autoencoder.png'), show_shapes=True)
if ENC_WEIGHTS != "None":
print ("Pre-loading encoder with weights...")
load_weights(ENC_WEIGHTS, encoder)
if DEC_WEIGHTS != "None":
print ("Pre-loading decoder with weights...")
load_weights(DEC_WEIGHTS, decoder)
def load_X(videos_list, index, data_dir):
X = np.zeros((BATCH_SIZE, VIDEO_LENGTH,) + IMG_SIZE)
for i in range(BATCH_SIZE):
for j in range(VIDEO_LENGTH):
filename = "frame_" + str(videos_list[(index*BATCH_SIZE + i), j]) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print (im_file)
print (e)
return X
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS):
print ("Loading data...")
frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_128.hkl'))
# Build video progressions
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + 1
end_frame_index = end_frame_index + 1
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
if SHUFFLE:
# Shuffle images to aid generalization
videos_list = np.random.permutation(videos_list)
# Build the Spatio-temporal Autoencoder
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
NB_ITERATIONS = int(n_videos/BATCH_SIZE)
# Setup TensorBoard Callback
TC = tb_callback.TensorBoard(log_dir=TF_LOG_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS.set_model(autoencoder)
print ("Beginning Training...")
# Begin Training
for epoch in range(NB_EPOCHS):
print("\n\nEpoch ", epoch)
loss = []
# Set learning rate every epoch
LRS.on_epoch_begin(epoch=epoch)
lr = K.get_value(autoencoder.optimizer.lr)
print ("Learning rate: " + str(lr))
for index in range(NB_ITERATIONS):
# Train Autoencoder
X = load_X(videos_list, index, DATA_DIR)
X_train = X[:, 0 : int(VIDEO_LENGTH/2)]
y_train = X[:, int(VIDEO_LENGTH/2) :]
loss.append(autoencoder.train_on_batch(X_train, y_train))
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS-1) + " " +
"loss: " + str(loss[len(loss)-1]) +
"\t [" + "{0}>".format("="*(arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
predicted_images = autoencoder.predict(X_train, verbose=0)
orig_image, truth_image, pred_image = combine_images(X_train, y_train, predicted_images)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
if epoch == 0 :
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_pred.png"), pred_image)
# then after each epoch/iteration
avg_loss = sum(loss)/len(loss)
logs = {'loss': avg_loss}
TC.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, \"d_loss\":%f};\n" % (epoch, avg_loss))
print("\nAvg loss: " + str(avg_loss))
# Save model weights per epoch to file
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_'+str(epoch)+'.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# End TensorBoard Callback
TC.on_train_end('_')
def test(ENC_WEIGHTS, DEC_WEIGHTS):
# Create models
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
for i in range(len(decoder.layers)):
print (decoder.layers[i], str(i))
# exit(0)
def build_intermediate_model(encoder, decoder):
# convlstm-13, conv3d-25
intermediate_decoder_1 = Model(inputs=decoder.layers[0].input, outputs=decoder.layers[19].output)
# intermediate_decoder_2 = Model(inputs=decoder.layers[0].input, outputs=decoder.layers[12].output)
imodel_1 = Sequential()
imodel_1.add(encoder)
imodel_1.add(intermediate_decoder_1)
# imodel_2 = Sequential()
# imodel_2.add(encoder)
# imodel_2.add(intermediate_decoder_2)
return imodel_1
imodel_1 = build_intermediate_model(encoder, decoder)
imodel_1.compile(loss='mean_squared_error', optimizer=OPTIM)
# imodel_2.compile(loss='mean_squared_error', optimizer=OPTIM)
# imodel = build_intermediate_model(encoder, decoder)
# Build video progressions
frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_128.hkl'))
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + VIDEO_LENGTH
end_frame_index = end_frame_index + VIDEO_LENGTH
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
# Test model by making predictions
loss = []
NB_ITERATIONS = int(n_videos / BATCH_SIZE)
for index in range(NB_ITERATIONS):
# Test Autoencoder
X = load_X(videos_list, index, TEST_DATA_DIR)
X_test = X[:, 0: int(VIDEO_LENGTH / 2)]
y_test = X[:, int(VIDEO_LENGTH / 2):]
loss.append(autoencoder.test_on_batch(X_test, y_test))
y_pred = autoencoder.predict_on_batch(X_test)
a_pred_1 = imodel_1.predict_on_batch(X_test)
# a_pred_2 = imodel_2.predict_on_batch(X_test)
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"loss: " + str(loss[len(loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
orig_image, truth_image, pred_image = combine_images(X_test, y_test, y_pred)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_pred.png"), pred_image)
#------------------------------------------
a_pred_1 = np.reshape(a_pred_1, newshape=(10, 10, 64, 64, 1))
np.save(os.path.join(TEST_RESULTS_DIR, 'attention_weights_' + str(index) +'.npy'), a_pred_1)
orig_image, truth_image, pred_image = combine_images(X_test, y_test, a_pred_1)
# pred_image = (pred_image*100) * 127.5 + 127.5
# y_pred = y_pred * 127.5 + 127.5
# np.save(os.path.join(TEST_RESULTS_DIR, 'attention_weights_' + str(index) + '.npy'), y_pred)
# cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_attn_1.png"), pred_image)
# a_pred_2 = np.reshape(a_pred_2, newshape=(10, 10, 16, 16, 1))
# with open('attention_weights.txt', mode='w') as file:
# file.write(str(a_pred_2[0, 4]))
# orig_image, truth_image, pred_image = combine_images(X_test, y_test, a_pred_2)
# pred_image = (pred_image*100) * 127.5 + 127.5
# cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_attn_2.png"), pred_image)
avg_loss = sum(loss) / len(loss)
print("\nAvg loss: " + str(avg_loss))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
if args.mode == "test":
test(ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights) | pratikgujjar/DeepIntent | code/autoencoder_model/scripts/attention_autoencoder.py | Python | mit | 20,029 |
#!/usr/bin/env python2.7
import numpy as np
import matplotlib.pyplot as plt
Freq=np.array([20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,260,270,280,290,300,310,320])
Db=np.array([70,76.7,87.1,95.4,94.2,93.2,93.2,93.9,95.4,97.7,101.3,106.3,110.7,106,104.1,103.3,103.1,103.9,105.5,108.0,111.2,113.3,112.3,110.4,109.1,108.4,108.1,108.3,109.1,109.9,112.2])
plt.xlabel('Frecuencia')
plt.ylabel('Decibel')
plt.title('DecibelvsFreq a 0.01volts CERRADO')
#for i in range(len(Freq)):
# plt.text(Freq[i],Db[i], r'$Freq=%f, \ Db=%f$' % (Freq[i], Db[i]))
plt.axis([0, 430, 50, 130])
plt.plot(Freq,Db,'bo',Freq,Db,'k')
plt.grid(True)
plt.show()
| P1R/cinves | TrabajoFinal/tubo180cm/2-DbvsFreq/DbvsFreq-Ampde0.01v-CERRADOeENEXTREMO.py | Python | apache-2.0 | 676 |
from os.path import join
from bundlewrap.utils.testing import make_repo, run
def test_empty(tmpdir):
make_repo(tmpdir)
stdout, stderr, rcode = run("bw hash", path=str(tmpdir))
assert stdout == b"bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f\n"
assert stderr == b""
def test_nondeterministic(tmpdir):
make_repo(
tmpdir,
nodes={
"node1": {
'bundles': ["bundle1"],
},
},
bundles={
"bundle1": {
'files': {
"/test": {
'content_type': 'mako',
'content': "<% import random %>${random.randint(1, 9999)}",
},
},
},
},
)
hashes = set()
for i in range(3):
stdout, stderr, rcode = run("bw hash", path=str(tmpdir))
hashes.add(stdout.strip())
assert len(hashes) > 1
def test_deterministic(tmpdir):
make_repo(
tmpdir,
nodes={
"node1": {
'bundles': ["bundle1"],
},
},
bundles={
"bundle1": {
'files': {
"/test": {
'content': "${node.name}",
'group': None, # BSD has a different default and we don't want to
# deal with that here
},
},
},
},
)
hashes = set()
for i in range(3):
stdout, stderr, rcode = run("bw hash", path=str(tmpdir))
hashes.add(stdout.strip())
assert len(hashes) == 1
assert hashes.pop() == b"2203e7acc35608bbff471c023b7b7498e5b385d9"
def test_dict(tmpdir):
make_repo(
tmpdir,
nodes={
"node1": {
'bundles': ["bundle1"],
},
},
bundles={
"bundle1": {
'files': {
"/test": {
'content': "yes please",
'group': None, # BSD has a different default and we don't want to
# deal with that here
},
},
},
},
)
stdout, stderr, rcode = run("bw hash -d", path=str(tmpdir))
assert rcode == 0
assert stdout == b"93e7a2c6e8cdc71fb4df5426bc0d0bb978d84381 node1\n"
stdout, stderr, rcode = run("bw hash -d node1", path=str(tmpdir))
assert rcode == 0
assert stdout == b"59d1a7c79640ccdfd3700ab141698a9389fcd0b7 file:/test\n"
stdout, stderr, rcode = run("bw hash -d node1 file:/test", path=str(tmpdir))
assert rcode == 0
assert stdout == (
b"content_hash\tc05a36d547e2b1682472f76985018038d1feebc5\n"
b"mode\t0644\n"
b"owner\troot\n"
b"type\tfile\n"
)
def test_metadata_empty(tmpdir):
make_repo(
tmpdir,
nodes={
"node1": {
'metadata': {},
},
},
)
stdout, stderr, rcode = run("bw hash -m node1", path=str(tmpdir))
assert rcode == 0
assert stdout == b"bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f\n"
def test_metadata_fault(tmpdir):
make_repo(tmpdir)
with open(join(str(tmpdir), "nodes.py"), 'w') as f:
f.write("""
nodes = {
'node1': {
'metadata': {'foo': vault.password_for("testing")},
},
'node2': {
'metadata': {'foo': vault.password_for("testing").value},
},
'node3': {
'metadata': {'foo': "faCTT76kagtDuZE5wnoiD1CxhGKmbgiX"},
},
'node4': {
'metadata': {'foo': "something else entirely"},
},
}
""")
print(run("bw debug -c 'print(repo.vault.password_for(\"testing\"))'", path=str(tmpdir)))
stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir))
assert stdout1 == b"d0c998fd17a68322a03345954bb0a75301d3a127\n"
assert stderr == b""
assert rcode == 0
stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir))
assert stdout2 == stdout1
assert stderr == b""
assert rcode == 0
stdout3, stderr, rcode = run("bw hash -m node3", path=str(tmpdir))
assert stdout3 == stdout1
assert stderr == b""
assert rcode == 0
stdout4, stderr, rcode = run("bw hash -m node4", path=str(tmpdir))
assert stdout4 != stdout1
assert stderr == b""
assert rcode == 0
def test_metadata_nested_sort(tmpdir):
make_repo(
tmpdir,
nodes={
"node1": {
'metadata': {
'nested': {
'one': True,
'two': False,
'three': 3,
'four': "four",
'five': None,
},
},
},
"node2": {
'metadata': {
'nested': {
'five': None,
'four': "four",
'one': True,
'three': 3,
'two': False,
},
},
},
},
)
stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir))
assert rcode == 0
assert stdout1 == b"bc403a093ca3399cd3efa7a64ec420e0afef5e70\n"
stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir))
assert rcode == 0
assert stdout1 == stdout2
def test_metadata_repo(tmpdir):
make_repo(
tmpdir,
nodes={
"node1": {
'metadata': {
'foo': 47,
},
},
},
)
stdout, stderr, rcode = run("bw hash -m", path=str(tmpdir))
assert rcode == 0
assert stdout == b"c0cc160ab1b6e71155cd4f65139bc7f66304d7f3\n"
def test_metadata_repo_dict(tmpdir):
make_repo(
tmpdir,
nodes={
"node1": {
'metadata': {
'foo': 47,
},
},
},
)
stdout, stderr, rcode = run("bw hash -md", path=str(tmpdir))
assert rcode == 0
assert stdout == b"node1\t013b3a8199695eb45c603ea4e0a910148d80e7ed\n"
def test_groups_repo(tmpdir):
make_repo(
tmpdir,
groups={
"group1": {},
"group2": {},
},
)
stdout, stderr, rcode = run("bw hash -g", path=str(tmpdir))
assert rcode == 0
assert stdout == b"479c737e191339e5fae20ac8a8903a75f6b91f4d\n"
def test_groups_repo_dict(tmpdir):
make_repo(
tmpdir,
groups={
"group1": {},
"group2": {},
},
)
stdout, stderr, rcode = run("bw hash -dg", path=str(tmpdir))
assert rcode == 0
assert stdout == b"group1\ngroup2\n"
def test_groups(tmpdir):
make_repo(
tmpdir,
groups={
"group1": {'members': ["node1", "node2"]},
"group2": {'members': ["node3"]},
},
nodes={
"node1": {},
"node2": {},
"node3": {},
},
)
stdout, stderr, rcode = run("bw hash -g group1", path=str(tmpdir))
assert rcode == 0
assert stdout == b"59f5a812acd22592b046b20e9afedc1cfcd37c77\n"
def test_groups_dict(tmpdir):
make_repo(
tmpdir,
groups={
"group1": {'members': ["node1", "node2"]},
"group2": {'members': ["node3"]},
},
nodes={
"node1": {},
"node2": {},
"node3": {},
},
)
stdout, stderr, rcode = run("bw hash -dg group1", path=str(tmpdir))
assert rcode == 0
assert stdout == b"node1\nnode2\n"
def test_groups_node(tmpdir):
make_repo(
tmpdir,
groups={
"group1": {'members': ["node1", "node2"]},
"group2": {'members': ["node3"]},
},
nodes={
"node1": {},
"node2": {},
"node3": {},
},
)
stdout, stderr, rcode = run("bw hash -g node1", path=str(tmpdir))
assert rcode == 0
assert stdout == b"6f4615dc71426549e22df7961bd2b88ba95ad1fc\n"
def test_groups_node_dict(tmpdir):
make_repo(
tmpdir,
groups={
"group1": {'members': ["node1", "node2"]},
"group2": {'members': ["node3"]},
},
nodes={
"node1": {},
"node2": {},
"node3": {},
},
)
stdout, stderr, rcode = run("bw hash -dg node1", path=str(tmpdir))
assert rcode == 0
assert stdout == b"group1\n"
| timbuchwaldt/bundlewrap | tests/integration/bw_hash.py | Python | gpl-3.0 | 8,581 |
# -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import scrapy
from function import get_title
key_word = '计算机视觉'
start_year, end_year = 2015, 2017
all_name = get_title()
# all_name = []
class Data_Spider(scrapy.Spider):
name = 'Crawler'
start_urls = ['http://s.wanfangdata.com.cn/Paper.aspx?q=关键字%3a{}+日期%3a{}-{}&f=top&p=1'.format(key_word, start_year, end_year)]
def parse(self, response):
messages = response.xpath("//div[@class='record-item-list']/div/div[@class='left-record']/div[@class='record-title']/a[@class='title']")
# paper list
for message in messages:
paper_url = message.xpath(".//@href").extract()[0]
paper_name = self.get_name(message)
if paper_name in all_name:
continue
yield scrapy.Request(paper_url, callback=self.parse_pages)
now_number = int(response.xpath("//p[@class='pager']/strong/text()").extract()[0])
last_number = int(response.xpath("//p[@class='pager']/span/text()").extract()[0].split('/')[1])
if now_number == last_number:
return
next_url = 'http://s.wanfangdata.com.cn/Paper.aspx?q=关键字%3a{}+日期%3a{}-{}&f=top&p={}'.format(key_word, start_year, end_year, now_number + 1)
yield scrapy.Request(next_url, callback=self.parse)
def parse_pages(self, response):
paper_name = response.xpath("//h1/text()").extract()[0].strip()
abstract = response.xpath("//div[@class='row clear zh']/div[@class='text']/text()").extract()[0].strip()
all_messages = {'title': paper_name, 'abstract': abstract}
return all_messages
def get_name(self, message):
texts = ''
text_list = message.xpath(".//text()").extract()
for text in text_list:
texts += text.strip()
return texts
| hbtech-ai/ARPS | classification_data/classification_data/spiders/data_crawler.py | Python | mit | 1,714 |
'''
http://inside.wot.kongzhong.com/inside/wotinside/signact/signinfo?jsonpcallback=jQuery&useraccount=&login=<base64(<login>)>=&zoneid=1500100
http://inside.wot.kongzhong.com/inside/wotinside/signact/sign?jsonpcallback=jQuery&useraccount=&login=<base64(<login>)>&zoneid=1500100
'''
# coding: utf-8
import base64
import time
import requests
def jsonp(url, **params):
params['jsonpcallback'] = 'jQuery'
res = requests.post(url, params)
def jQuery(obj): # NOQA
return obj
return eval(res.text)
def dbg(fmt, *args):
msg = fmt % args
if isinstance(msg, bytes):
msg = msg.decode('ascii')
print(msg)
def call(api, login, zoneid=1500100):
_login = base64.b64encode(login)
url = b'http://inside.wot.kongzhong.com/inside/wotinside/signact/%b'
url = url % api
data = jsonp(url, login=_login, zoneid=zoneid, useraccount='')
dbg(b"%b('%b', %d) => %r", api, login, zoneid, data)
return data
def sign(login, zoneid=1500100):
return call(b'sign', login, zoneid)
def signinfo(login, zoneid=1500100):
return call(b'signinfo', login, zoneid)
def xxx_sign(login, times, zoneid=1500100):
for _ in range(times):
sign(login, zoneid)
login += b' '
time.sleep(5)
if __name__ == '__main__':
# signinfo(b'xxx')
sign(b'xxx')
# xxx_sign(b'xxx', 10)
| zhaipro/misc | hack_wot.py | Python | mit | 1,354 |
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from webob import Response
from webob.exc import HTTPNotFound, HTTPPreconditionFailed, \
HTTPInternalServerError, HTTPConflict, HTTPBadRequest
from lunr.cinder.cinderclient import CinderError
from lunr.common import logger
from lunr.common.exc import NodeError
from lunr.storage.controller.base import BaseController, lock, inspect
from lunr.common.lock import ResourceFile
from lunr.storage.helper.volume import NotFound, AlreadyExists, InvalidImage
from lunr.storage.helper.utils import ServiceUnavailable, ResourceBusy
class VolumeController(BaseController):
def index(self, req):
volumes = self.helper.volumes.list()
return Response(volumes)
def show(self, req):
try:
volume = self.helper.volumes.get(self.id)
except NotFound:
raise HTTPNotFound("No volume named '%s'" % self.id)
return Response(volume)
def _validate_iops(self, req):
try:
read_iops = int(req.params.get('read_iops', 0))
except ValueError:
raise HTTPPreconditionFailed("'read_iops' parameter must be an "
"integer")
if read_iops < 0:
raise HTTPPreconditionFailed("'read_iops' parameter can not be "
"negative")
try:
write_iops = int(req.params.get('write_iops', 0))
except ValueError:
raise HTTPPreconditionFailed("'write_iops' parameter must be an "
"integer")
if write_iops < 0:
raise HTTPPreconditionFailed("'write_iops' parameter can not be "
"negative")
return {
'read_iops': read_iops,
'write_iops': write_iops
}
def _validate_size(self, req):
try:
size = int(req.params['size'])
except KeyError:
raise HTTPBadRequest("Must specify size")
except ValueError:
raise HTTPPreconditionFailed("'size' parameter must be an integer")
if size < 0:
raise HTTPPreconditionFailed("'size' parameter can not be "
"negative")
return size
def _validate_backup_params(self, req):
backup_id = req.params['backup_id']
if len(backup_id) > 60:
raise HTTPPreconditionFailed(
"length of 'backup_id' parameter cannot exceed 60")
try:
backup_source_volume_id = req.params['backup_source_volume_id']
if len(backup_source_volume_id) > 60:
raise HTTPPreconditionFailed(
"length of 'backup_source_volume_id' parameter cannot"
" exceed 60")
except KeyError:
raise HTTPBadRequest("Must specify backup_source_volume_id")
return {
'backup_source_volume_id': backup_source_volume_id,
'backup_id': backup_id,
}
def _create_from_image_cb(self, req, iops):
def callback():
lunr_state = 'IMAGING_SCRUB'
try:
volume = self.helper.volumes.get(self.id)
self.helper.cgroups.set_read_iops(volume, iops['read_iops'])
self.helper.cgroups.set_write_iops(volume, iops['write_iops'])
except NotFound:
lunr_state = 'IMAGING_ERROR'
self.helper.make_api_request('volumes', self.id, data={
'status': lunr_state})
return callback
def _post_scrub_cb(self):
def callback():
lunr_state = 'ACTIVE'
try:
volume = self.helper.volumes.get(self.id)
except NotFound:
lunr_state = 'DELETED'
self.helper.make_api_request('volumes', self.id, data={
'status': lunr_state})
return callback
def _create_from_backup_cb(self, req, iops):
def callback():
volume = self.helper.volumes.get(self.id)
self.helper.cgroups.set_read_iops(volume, iops['read_iops'])
self.helper.cgroups.set_write_iops(volume, iops['write_iops'])
self.helper.make_api_request('volumes', self.id, data={
'status': 'ACTIVE'})
self.helper.make_api_request(
'backups/%s/restores' % req.params['backup_id'],
self.id, method='DELETE')
account = req.params.get('account')
if account:
cinder = self.helper.get_cinder(account)
cinder.delete_volume_metadata(self.id, 'restore-progress')
return callback
def _validate_source_params(self, req):
source_volume_id = req.params['source_volume_id']
if len(source_volume_id) > 60:
raise HTTPPreconditionFailed(
"length of 'source_volume_id' parameter "
"cannot exceed 60")
try:
source_host = req.params['source_host']
except KeyError:
raise HTTPBadRequest("Must specify source_host")
try:
source_port = req.params['source_port']
except KeyError:
raise HTTPBadRequest("Must specify source_port")
return {
'id': source_volume_id,
'host': source_host,
'port': source_port,
}
@lock("volumes/%(id)s/resource")
def create(self, req, lock):
if len(self.id) > 94:
raise HTTPPreconditionFailed(
"length of volume id cannot exceed 94")
if '.' in self.id:
raise HTTPPreconditionFailed("volume id cannot contain '.'")
params = {'lock': lock}
params['size'] = self._validate_size(req)
iops = self._validate_iops(req)
# Create from backup.
if req.params.get('backup_id'):
params.update(self._validate_backup_params(req))
params['callback'] = self._create_from_backup_cb(req, iops)
account = req.params.get('account')
if account:
params['cinder'] = self.helper.get_cinder(account)
try:
self.helper.volumes.create(self.id, **params)
except AlreadyExists, e:
raise HTTPConflict(str(e))
volume = self.helper.volumes.get(self.id)
volume['status'] = 'BUILDING'
# Create a clone
elif req.params.get('source_volume_id'):
source = self._validate_source_params(req)
# FIXME. Setting cgroups here would be silly, since we
# want a fast clone. How do we set them later?
# def callback():
# pass
# params['callback'] = callback
try:
self.helper.volumes.create(self.id, **params)
except AlreadyExists, e:
raise HTTPConflict(str(e))
volume = self.helper.volumes.get(self.id)
logger.debug('Created new volume %s to be clone of %s'
% (volume['id'], source['id']))
logger.debug('Creating export of new volume %s' % volume['id'])
try:
export = self.helper.exports.create(volume['id'])
except ServiceUnavailable:
self.helper.volumes.delete(volume['id'], lock=lock)
raise
# Tell other node to clone!
path = '/volumes/%s/clones/%s' % (source['id'], volume['id'])
node_params = {
'account': req.params.get('account', ''),
'iqn': export['name'],
'iscsi_ip': self.helper.storage_host,
'iscsi_port': self.helper.storage_port,
# First dirty method to close the export.
'mgmt_host': self.helper.management_host,
'mgmt_port': self.helper.management_port,
'cinder_host': self.helper.cinder_host,
}
try:
self.helper.node_request(source['host'], source['port'],
'PUT', path, **node_params)
except NodeError, e:
logger.error('Clone node request failed: %s' % e)
self.helper.exports.delete(volume['id'])
self.helper.volumes.delete(volume['id'], lock=lock)
raise
volume['status'] = 'CLONING'
# Create from image
elif req.params.get('image_id'):
image_id = params['image_id'] = req.params['image_id']
account = params['account'] = req.params.get('account')
params['callback'] = self._create_from_image_cb(req, iops)
params['scrub_callback'] = self._post_scrub_cb()
try:
self.helper.volumes.create(self.id, **params)
except InvalidImage, e:
logger.error("InvalidImage: %s" % e)
raise HTTPPreconditionFailed("Invalid image: %s" % image_id)
except AlreadyExists:
raise HTTPConflict("Volume named '%s' already exists" %
self.id)
volume = self.helper.volumes.get(self.id)
volume['status'] = 'IMAGING'
else:
# create raw volume
try:
self.helper.volumes.create(self.id, **params)
except AlreadyExists:
raise HTTPConflict("Volume named '%s' already exists" %
self.id)
volume = self.helper.volumes.get(self.id)
self.helper.cgroups.set_read_iops(volume, iops['read_iops'])
self.helper.cgroups.set_write_iops(volume, iops['write_iops'])
volume['status'] = 'ACTIVE'
return Response(volume)
@lock("volumes/%(id)s/resource")
def delete(self, req, lock):
try:
volume = self.helper.volumes.get(self.id)
except NotFound:
raise HTTPNotFound("Cannot delete non-existant volume '%s'" %
self.id)
try:
# delete export in try block to avoid race
out = self.helper.exports.delete(self.id)
except NotFound:
# Might have recieved a duplicate delete request,
# but are still in the process of deletion
logger.debug("Requested deletion of '%s' but no export was "
"found" % self.id)
except ResourceBusy:
raise HTTPConflict("Cannot delete '%s' while export in "
"use" % self.id)
def callback():
self.helper.make_api_request('volumes', self.id,
data={'status': 'DELETED'})
# delete volume
try:
self.helper.cgroups.set_read_iops(volume, 0)
self.helper.cgroups.set_write_iops(volume, 0)
out = self.helper.volumes.delete(self.id, callback, lock)
volume['status'] = 'DELETING'
return Response(volume)
except NotFound:
raise HTTPNotFound("No volume named '%s'" % self.id)
@lock("volumes/%(id)s/resource")
def audit(self, req, lock):
# We don't need a real volume to do this, it might be deleted.
volume = {'id': self.id}
callback = None
backup_id = req.params.get('backup_id')
if backup_id:
def cb():
self.helper.make_api_request('backups', backup_id,
data={'status': 'DELETED'})
# Wee bit of pessimism here. This happens in backup.delete,
# but if it fails, we'll retry after audit.
account = req.params.get('account')
if account:
cinder = self.helper.get_cinder(account)
try:
cinder.force_delete('snapshots', backup_id)
except CinderError, e:
if e.code != 404:
raise
callback = cb
self.helper.backups.run_audit(volume, lock=lock, callback=callback)
return Response(volume)
def lock(self, req):
info = inspect(self, req, "volumes/%(id)s/resource")
with ResourceFile(info['lock_file']) as lock:
used = lock.used()
if used:
resp = {'in-use': True}
resp.update(used)
return Response(resp)
pass
return Response({'in-use': False})
| audip/lunr | lunr/storage/controller/volume.py | Python | apache-2.0 | 13,157 |
#
# Copyright © 2012–2022 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from appconf import AppConf
class WeblateConf(AppConf):
"""Machine translation settings."""
# URL of the Apertium APy server
APERTIUM_APY = None
# Amazon Translate settings
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
AWS_REGION = None
# Microsoft Conginite Services Translator
MICROSOFT_COGNITIVE_KEY = None
MICROSOFT_BASE_URL = "api.cognitive.microsofttranslator.com"
MICROSOFT_ENDPOINT_URL = "api.cognitive.microsoft.com"
# Microsoft Azure services region identification code
MICROSOFT_REGION = None
# MyMemory identification email, see
# https://mymemory.translated.net/doc/spec.php
MYMEMORY_EMAIL = None
# Optional MyMemory credentials to access private translation memory
MYMEMORY_USER = None
MYMEMORY_KEY = None
# Google API key for Google Translate API
GOOGLE_KEY = None
# Google Translate API3 credentials and project id
GOOGLE_CREDENTIALS = None
GOOGLE_PROJECT = None
GOOGLE_LOCATION = "global"
# ModernMT
MODERNMT_KEY = None
MODERNMT_URL = "https://api.modernmt.com/"
# API key for Yandex Translate API
YANDEX_KEY = None
# tmserver URL
TMSERVER = None
# API key for DeepL API
DEEPL_KEY = None
DEEPL_API_URL = "https://api.deepl.com/v2/"
# API key for LibreTranslate
LIBRETRANSLATE_KEY = None
LIBRETRANSLATE_API_URL = None
# SAP Translation Hub
SAP_BASE_URL = None
SAP_SANDBOX_APIKEY = None
SAP_USERNAME = None
SAP_PASSWORD = None
SAP_USE_MT = True
# Youdao
YOUDAO_ID = None
YOUDAO_SECRET = None
# Netease
NETEASE_KEY = None
NETEASE_SECRET = None
# List of machine translations
SERVICES = (
"weblate.machinery.weblatetm.WeblateTranslation",
"weblate.memory.machine.WeblateMemory",
)
class Meta:
prefix = "MT"
| nijel/weblate | weblate/machinery/models.py | Python | gpl-3.0 | 2,648 |
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^jobs/', include('transparencyjobs.jobs.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^$', 'django.views.generic.simple.redirect_to', {'url': '/jobs/'}),
) | codeforamerica/transparencyjobs | urls.py | Python | bsd-3-clause | 299 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.monitoring_dashboard_v1.types import common
__protobuf__ = proto.module(
package="google.monitoring.dashboard.v1",
manifest={
"SparkChartType",
"TimeSeriesQuery",
"TimeSeriesFilter",
"TimeSeriesFilterRatio",
"Threshold",
},
)
class SparkChartType(proto.Enum):
r"""Defines the possible types of spark chart supported by the
``Scorecard``.
"""
SPARK_CHART_TYPE_UNSPECIFIED = 0
SPARK_LINE = 1
SPARK_BAR = 2
class TimeSeriesQuery(proto.Message):
r"""TimeSeriesQuery collects the set of supported methods for
querying time series data from the Stackdriver metrics API.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
time_series_filter (google.cloud.monitoring_dashboard_v1.types.TimeSeriesFilter):
Filter parameters to fetch time series.
This field is a member of `oneof`_ ``source``.
time_series_filter_ratio (google.cloud.monitoring_dashboard_v1.types.TimeSeriesFilterRatio):
Parameters to fetch a ratio between two time
series filters.
This field is a member of `oneof`_ ``source``.
time_series_query_language (str):
A query used to fetch time series.
This field is a member of `oneof`_ ``source``.
unit_override (str):
The unit of data contained in fetched time series. If
non-empty, this unit will override any unit that accompanies
fetched data. The format is the same as the
```unit`` <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors>`__
field in ``MetricDescriptor``.
"""
time_series_filter = proto.Field(
proto.MESSAGE, number=1, oneof="source", message="TimeSeriesFilter",
)
time_series_filter_ratio = proto.Field(
proto.MESSAGE, number=2, oneof="source", message="TimeSeriesFilterRatio",
)
time_series_query_language = proto.Field(proto.STRING, number=3, oneof="source",)
unit_override = proto.Field(proto.STRING, number=5,)
class TimeSeriesFilter(proto.Message):
r"""A filter that defines a subset of time series data that is displayed
in a widget. Time series data is fetched using the
```ListTimeSeries`` <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list>`__
method.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
filter (str):
Required. The `monitoring
filter <https://cloud.google.com/monitoring/api/v3/filters>`__
that identifies the metric types, resources, and projects to
query.
aggregation (google.cloud.monitoring_dashboard_v1.types.Aggregation):
By default, the raw time series data is
returned. Use this field to combine multiple
time series for different views of the data.
secondary_aggregation (google.cloud.monitoring_dashboard_v1.types.Aggregation):
Apply a second aggregation after ``aggregation`` is applied.
pick_time_series_filter (google.cloud.monitoring_dashboard_v1.types.PickTimeSeriesFilter):
Ranking based time series filter.
This field is a member of `oneof`_ ``output_filter``.
statistical_time_series_filter (google.cloud.monitoring_dashboard_v1.types.StatisticalTimeSeriesFilter):
Statistics based time series filter.
Note: This field is deprecated and completely
ignored by the API.
This field is a member of `oneof`_ ``output_filter``.
"""
filter = proto.Field(proto.STRING, number=1,)
aggregation = proto.Field(proto.MESSAGE, number=2, message=common.Aggregation,)
secondary_aggregation = proto.Field(
proto.MESSAGE, number=3, message=common.Aggregation,
)
pick_time_series_filter = proto.Field(
proto.MESSAGE,
number=4,
oneof="output_filter",
message=common.PickTimeSeriesFilter,
)
statistical_time_series_filter = proto.Field(
proto.MESSAGE,
number=5,
oneof="output_filter",
message=common.StatisticalTimeSeriesFilter,
)
class TimeSeriesFilterRatio(proto.Message):
r"""A pair of time series filters that define a ratio
computation. The output time series is the pair-wise division of
each aligned element from the numerator and denominator time
series.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
numerator (google.cloud.monitoring_dashboard_v1.types.TimeSeriesFilterRatio.RatioPart):
The numerator of the ratio.
denominator (google.cloud.monitoring_dashboard_v1.types.TimeSeriesFilterRatio.RatioPart):
The denominator of the ratio.
secondary_aggregation (google.cloud.monitoring_dashboard_v1.types.Aggregation):
Apply a second aggregation after the ratio is
computed.
pick_time_series_filter (google.cloud.monitoring_dashboard_v1.types.PickTimeSeriesFilter):
Ranking based time series filter.
This field is a member of `oneof`_ ``output_filter``.
statistical_time_series_filter (google.cloud.monitoring_dashboard_v1.types.StatisticalTimeSeriesFilter):
Statistics based time series filter.
Note: This field is deprecated and completely
ignored by the API.
This field is a member of `oneof`_ ``output_filter``.
"""
class RatioPart(proto.Message):
r"""Describes a query to build the numerator or denominator of a
TimeSeriesFilterRatio.
Attributes:
filter (str):
Required. The `monitoring
filter <https://cloud.google.com/monitoring/api/v3/filters>`__
that identifies the metric types, resources, and projects to
query.
aggregation (google.cloud.monitoring_dashboard_v1.types.Aggregation):
By default, the raw time series data is
returned. Use this field to combine multiple
time series for different views of the data.
"""
filter = proto.Field(proto.STRING, number=1,)
aggregation = proto.Field(proto.MESSAGE, number=2, message=common.Aggregation,)
numerator = proto.Field(proto.MESSAGE, number=1, message=RatioPart,)
denominator = proto.Field(proto.MESSAGE, number=2, message=RatioPart,)
secondary_aggregation = proto.Field(
proto.MESSAGE, number=3, message=common.Aggregation,
)
pick_time_series_filter = proto.Field(
proto.MESSAGE,
number=4,
oneof="output_filter",
message=common.PickTimeSeriesFilter,
)
statistical_time_series_filter = proto.Field(
proto.MESSAGE,
number=5,
oneof="output_filter",
message=common.StatisticalTimeSeriesFilter,
)
class Threshold(proto.Message):
r"""Defines a threshold for categorizing time series values.
Attributes:
label (str):
A label for the threshold.
value (float):
The value of the threshold. The value should
be defined in the native scale of the metric.
color (google.cloud.monitoring_dashboard_v1.types.Threshold.Color):
The state color for this threshold. Color is
not allowed in a XyChart.
direction (google.cloud.monitoring_dashboard_v1.types.Threshold.Direction):
The direction for the current threshold.
Direction is not allowed in a XyChart.
"""
class Color(proto.Enum):
r"""The color suggests an interpretation to the viewer when
actual values cross the threshold. Comments on each color
provide UX guidance on how users can be expected to interpret a
given state color.
"""
COLOR_UNSPECIFIED = 0
YELLOW = 4
RED = 6
class Direction(proto.Enum):
r"""Whether the threshold is considered crossed by an actual
value above or below its threshold value.
"""
DIRECTION_UNSPECIFIED = 0
ABOVE = 1
BELOW = 2
label = proto.Field(proto.STRING, number=1,)
value = proto.Field(proto.DOUBLE, number=2,)
color = proto.Field(proto.ENUM, number=3, enum=Color,)
direction = proto.Field(proto.ENUM, number=4, enum=Direction,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-monitoring-dashboards | google/cloud/monitoring_dashboard_v1/types/metrics.py | Python | apache-2.0 | 9,974 |
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class KLWinesSpider(BaseSpider):
name = 'klwines.com'
allowed_domains = ['www.klwines.com', 'klwines.com']
start_urls = ('http://www.klwines.com/content.asp?N=0&display=500&Nr=OR%28OutofStock%3AN%2CInventory+Location%3ASpecial+Order%29&Ns=p_lotGeneratedFromPOYN|0||p_price', )
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
# categories = hxs.select(u'').extract()
# for url in categories:
# url = urljoin_rfc(get_base_url(response), url)
# yield Request(url)
# pagination
next_page = hxs.select(u'//a[@title="Next Page"]/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
products = hxs.select(u'//div[@class="result clearfix"]')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
url = product.select(u'.//div[@class="result-desc"]/a/@href').extract()
name = product.select(u'.//div[@class="result-desc"]/a/text()').extract()
if not url:
url = product.select(u'.//div[@class="auctionResult-desc"]/p/a/@href').extract()
name = product.select(u'.//div[@class="auctionResult-desc"]/p/a/text()').extract()
url = urljoin_rfc(get_base_url(response), url[0])
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_xpath('price', u'.//span[@class="price"]/span[@class="global-serif global-pop-color"]/strong/text()')
if loader.get_output_value('price'):
yield loader.load_item()
| 0--key/lib | portfolio/Python/scrapy/applejack/klwines.py | Python | apache-2.0 | 2,489 |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import numpy as np
import pytest
import sockeye.constants as C
import sockeye.coverage
import sockeye.rnn_attention
from test.common import gaussian_vector, integer_vector
attention_types = [C.ATT_BILINEAR, C.ATT_DOT, C.ATT_LOC, C.ATT_MLP]
def test_att_bilinear():
config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_BILINEAR,
num_hidden=None,
input_previous_word=True,
source_num_hidden=None,
query_num_hidden=6,
layer_normalization=False,
config_coverage=None)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=None)
assert type(attention) == sockeye.rnn_attention.BilinearAttention
assert not attention._input_previous_word
assert attention.num_hidden == 6
def test_att_dot():
config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_DOT,
num_hidden=2,
input_previous_word=True,
source_num_hidden=4,
query_num_hidden=6,
layer_normalization=False,
config_coverage=None,
is_scaled=False)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=None)
assert type(attention) == sockeye.rnn_attention.DotAttention
assert attention._input_previous_word
assert attention.project_source
assert attention.project_query
assert attention.num_hidden == 2
assert attention.is_scaled is False
assert not attention.scale
def test_att_dot_scaled():
config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_DOT,
num_hidden=16,
input_previous_word=True,
source_num_hidden=None,
query_num_hidden=None,
layer_normalization=False,
config_coverage=None,
is_scaled=True)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=None)
assert type(attention) == sockeye.rnn_attention.DotAttention
assert attention._input_previous_word
assert attention.project_source
assert attention.project_query
assert attention.num_hidden == 16
assert attention.is_scaled is True
assert attention.scale == 0.25
def test_att_mh_dot():
config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_MH_DOT,
num_hidden=None,
input_previous_word=True,
source_num_hidden=8,
query_num_hidden=None,
layer_normalization=False,
config_coverage=None,
num_heads=2)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=None)
assert type(attention) == sockeye.rnn_attention.MultiHeadDotAttention
assert attention._input_previous_word
assert attention.num_hidden == 8
assert attention.heads == 2
assert attention.num_hidden_per_head == 4
def test_att_fixed():
config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_FIXED,
num_hidden=None,
input_previous_word=True,
source_num_hidden=None,
query_num_hidden=None,
layer_normalization=False,
config_coverage=None)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=None)
assert type(attention) == sockeye.rnn_attention.EncoderLastStateAttention
assert attention._input_previous_word
def test_att_loc():
config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_LOC,
num_hidden=None,
input_previous_word=True,
source_num_hidden=None,
query_num_hidden=None,
layer_normalization=False,
config_coverage=None)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=10)
assert type(attention) == sockeye.rnn_attention.LocationAttention
assert attention._input_previous_word
assert attention.max_source_seq_len == 10
def test_att_mlp():
config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_MLP,
num_hidden=16,
input_previous_word=True,
source_num_hidden=None,
query_num_hidden=None,
layer_normalization=True,
config_coverage=None)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=10)
assert type(attention) == sockeye.rnn_attention.MlpAttention
assert attention._input_previous_word
assert attention.attention_num_hidden == 16
assert attention.dynamic_source_num_hidden == 1
assert attention._ln
assert not attention.coverage
def test_att_cov():
config_coverage = sockeye.coverage.CoverageConfig(type='tanh', num_hidden=5, layer_normalization=True)
config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_COV,
num_hidden=16,
input_previous_word=True,
source_num_hidden=None,
query_num_hidden=None,
layer_normalization=True,
config_coverage=config_coverage)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=10)
assert type(attention) == sockeye.rnn_attention.MlpCovAttention
assert attention._input_previous_word
assert attention.attention_num_hidden == 16
assert attention.dynamic_source_num_hidden == 5
assert attention._ln
assert type(attention.coverage) == sockeye.coverage.ActivationCoverage
@pytest.mark.parametrize("attention_type", attention_types)
def test_attention(attention_type,
batch_size=1,
encoder_num_hidden=2,
decoder_num_hidden=2):
# source: (batch_size, seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
# source_length: (batch_size,)
source_length = mx.sym.Variable("source_length")
source_seq_len = 3
config_attention = sockeye.rnn_attention.AttentionConfig(type=attention_type,
num_hidden=2,
input_previous_word=False,
source_num_hidden=2,
query_num_hidden=2,
layer_normalization=False,
config_coverage=None)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len)
attention_state = attention.get_initial_state(source_length, source_seq_len)
attention_func = attention.on(source, source_length, source_seq_len)
attention_input = attention.make_input(0, mx.sym.Variable("word_vec_prev"), mx.sym.Variable("decoder_state"))
attention_state = attention_func(attention_input, attention_state)
sym = mx.sym.Group([attention_state.context, attention_state.probs])
executor = sym.simple_bind(ctx=mx.cpu(),
source=(batch_size, source_seq_len, encoder_num_hidden),
source_length=(batch_size,),
decoder_state=(batch_size, decoder_num_hidden))
# TODO: test for other inputs (that are not equal at each source position)
executor.arg_dict["source"][:] = np.asarray([[[1., 2.], [1., 2.], [3., 4.]]])
executor.arg_dict["source_length"][:] = np.asarray([2.0])
executor.arg_dict["decoder_state"][:] = np.asarray([[5, 6]])
exec_output = executor.forward()
context_result = exec_output[0].asnumpy()
attention_prob_result = exec_output[1].asnumpy()
# expecting uniform attention_weights of 0.5: 0.5 * seq1 + 0.5 * seq2
assert np.isclose(context_result, np.asarray([[1., 2.]])).all()
# equal attention to first two and no attention to third
assert np.isclose(attention_prob_result, np.asarray([[0.5, 0.5, 0.]])).all()
coverage_cases = [("gru", 10), ("tanh", 4), ("count", 1), ("sigmoid", 1), ("relu", 30)]
@pytest.mark.parametrize("attention_coverage_type,attention_coverage_num_hidden", coverage_cases)
def test_coverage_attention(attention_coverage_type,
attention_coverage_num_hidden,
batch_size=3,
encoder_num_hidden=2,
decoder_num_hidden=2):
# source: (batch_size, seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
# source_length: (batch_size, )
source_length = mx.sym.Variable("source_length")
source_seq_len = 10
config_coverage = sockeye.coverage.CoverageConfig(type=attention_coverage_type,
num_hidden=attention_coverage_num_hidden,
layer_normalization=False)
config_attention = sockeye.rnn_attention.AttentionConfig(type="coverage",
num_hidden=5,
input_previous_word=False,
source_num_hidden=encoder_num_hidden,
query_num_hidden=decoder_num_hidden,
layer_normalization=False,
config_coverage=config_coverage)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len)
attention_state = attention.get_initial_state(source_length, source_seq_len)
attention_func = attention.on(source, source_length, source_seq_len)
attention_input = attention.make_input(0, mx.sym.Variable("word_vec_prev"), mx.sym.Variable("decoder_state"))
attention_state = attention_func(attention_input, attention_state)
sym = mx.sym.Group([attention_state.context, attention_state.probs, attention_state.dynamic_source])
source_shape = (batch_size, source_seq_len, encoder_num_hidden)
source_length_shape = (batch_size,)
decoder_state_shape = (batch_size, decoder_num_hidden)
executor = sym.simple_bind(ctx=mx.cpu(),
source=source_shape,
source_length=source_length_shape,
decoder_state=decoder_state_shape)
source_length_vector = integer_vector(shape=source_length_shape, max_value=source_seq_len)
executor.arg_dict["source"][:] = gaussian_vector(shape=source_shape)
executor.arg_dict["source_length"][:] = source_length_vector
executor.arg_dict["decoder_state"][:] = gaussian_vector(shape=decoder_state_shape)
exec_output = executor.forward()
context_result = exec_output[0].asnumpy()
attention_prob_result = exec_output[1].asnumpy()
dynamic_source_result = exec_output[2].asnumpy()
expected_probs = (1. / source_length_vector).reshape((batch_size, 1))
assert context_result.shape == (batch_size, encoder_num_hidden)
assert attention_prob_result.shape == (batch_size, source_seq_len)
assert dynamic_source_result.shape == (batch_size, source_seq_len, attention_coverage_num_hidden)
assert (np.sum(np.isclose(attention_prob_result, expected_probs), axis=1) == source_length_vector).all()
def test_last_state_attention(batch_size=1,
encoder_num_hidden=2):
"""
EncoderLastStateAttention is a bit different from other attention mechanisms as it doesn't take a query argument
and doesn't return a probability distribution over the inputs (aka alignment).
"""
# source: (batch_size, seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
# source_length: (batch_size,)
source_length = mx.sym.Variable("source_length")
source_seq_len = 3
config_attention = sockeye.rnn_attention.AttentionConfig(type="fixed",
num_hidden=0,
input_previous_word=False,
source_num_hidden=2,
query_num_hidden=2,
layer_normalization=False,
config_coverage=None)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len)
attention_state = attention.get_initial_state(source_length, source_seq_len)
attention_func = attention.on(source, source_length, source_seq_len)
attention_input = attention.make_input(0, mx.sym.Variable("word_vec_prev"), mx.sym.Variable("decoder_state"))
attention_state = attention_func(attention_input, attention_state)
sym = mx.sym.Group([attention_state.context, attention_state.probs])
executor = sym.simple_bind(ctx=mx.cpu(),
source=(batch_size, source_seq_len, encoder_num_hidden),
source_length=(batch_size,))
# TODO: test for other inputs (that are not equal at each source position)
executor.arg_dict["source"][:] = np.asarray([[[1., 2.], [1., 2.], [3., 4.]]])
executor.arg_dict["source_length"][:] = np.asarray([2.0])
exec_output = executor.forward()
context_result = exec_output[0].asnumpy()
attention_prob_result = exec_output[1].asnumpy()
# expecting attention on last state based on source_length
assert np.isclose(context_result, np.asarray([[1., 2.]])).all()
assert np.isclose(attention_prob_result, np.asarray([[0., 1.0, 0.]])).all()
def test_get_context_and_attention_probs():
source = mx.sym.Variable('source')
source_length = mx.sym.Variable('source_length')
attention_scores = mx.sym.Variable('scores')
context, att_probs = sockeye.rnn_attention.get_context_and_attention_probs(
source,
source_length,
attention_scores,
C.DTYPE_FP32)
sym = mx.sym.Group([context, att_probs])
assert len(sym.list_arguments()) == 3
batch_size, seq_len, num_hidden = 32, 50, 100
# data
source_nd = mx.nd.random_normal(shape=(batch_size, seq_len, num_hidden))
source_length_np = np.random.randint(1, seq_len+1, (batch_size,))
source_length_nd = mx.nd.array(source_length_np)
scores_nd = mx.nd.zeros((batch_size, seq_len, 1))
in_shapes, out_shapes, _ = sym.infer_shape(source=source_nd.shape,
source_length=source_length_nd.shape,
scores=scores_nd.shape)
assert in_shapes == [(batch_size, seq_len, num_hidden), (batch_size, seq_len, 1), (batch_size,)]
assert out_shapes == [(batch_size, num_hidden), (batch_size, seq_len)]
context, probs = sym.eval(source=source_nd,
source_length=source_length_nd,
scores=scores_nd)
expected_probs = (1. / source_length_nd).reshape((batch_size, 1)).asnumpy()
assert (np.sum(np.isclose(probs.asnumpy(), expected_probs), axis=1) == source_length_np).all()
| artemsok/sockeye | test/unit/test_attention.py | Python | apache-2.0 | 18,317 |
## \file
## \ingroup tutorial_roofit
## \notebook
## Special pdf's: special decay pdf for B physics with mixing and/or CP violation
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# B-decay with mixing
# -------------------------
# Construct pdf
# -------------------------
# Observable
dt = ROOT.RooRealVar("dt", "dt", -10, 10)
dt.setBins(40)
# Parameters
dm = ROOT.RooRealVar("dm", "delta m(B0)", 0.472)
tau = ROOT.RooRealVar("tau", "tau (B0)", 1.547)
w = ROOT.RooRealVar("w", "flavour mistag rate", 0.1)
dw = ROOT.RooRealVar("dw", "delta mistag rate for B0/B0bar", 0.1)
mixState = ROOT.RooCategory("mixState", "B0/B0bar mixing state")
mixState.defineType("mixed", -1)
mixState.defineType("unmixed", 1)
tagFlav = ROOT.RooCategory("tagFlav", "Flavour of the tagged B0")
tagFlav.defineType("B0", 1)
tagFlav.defineType("B0bar", -1)
# Use delta function resolution model
tm = ROOT.RooTruthModel("tm", "truth model", dt)
# Construct Bdecay with mixing
bmix = ROOT.RooBMixDecay(
"bmix",
"decay",
dt,
mixState,
tagFlav,
tau,
dm,
w,
dw,
tm,
ROOT.RooBMixDecay.DoubleSided)
# Plot pdf in various slices
# ---------------------------------------------------
# Generate some data
data = bmix.generate(ROOT.RooArgSet(dt, mixState, tagFlav), 10000)
# Plot B0 and B0bar tagged data separately
# For all plots below B0 and B0 tagged data will look somewhat differently
# if the flavor tagging mistag rate for B0 and B0 is different (i.e. dw!=0)
frame1 = dt.frame(ROOT.RooFit.Title(
"B decay distribution with mixing (B0/B0bar)"))
data.plotOn(frame1, ROOT.RooFit.Cut("tagFlav==tagFlav::B0"))
bmix.plotOn(frame1, ROOT.RooFit.Slice(tagFlav, "B0"))
data.plotOn(frame1, ROOT.RooFit.Cut("tagFlav==tagFlav::B0bar"),
ROOT.RooFit.MarkerColor(ROOT.kCyan))
bmix.plotOn(frame1, ROOT.RooFit.Slice(tagFlav, "B0bar"),
ROOT.RooFit.LineColor(ROOT.kCyan))
# Plot mixed slice for B0 and B0bar tagged data separately
frame2 = dt.frame(ROOT.RooFit.Title(
"B decay distribution of mixed events (B0/B0bar)"))
data.plotOn(frame2, ROOT.RooFit.Cut(
"mixState==mixState::mixed&&tagFlav==tagFlav::B0"))
bmix.plotOn(frame2, ROOT.RooFit.Slice(tagFlav, "B0"),
ROOT.RooFit.Slice(mixState, "mixed"))
data.plotOn(
frame2,
ROOT.RooFit.Cut("mixState==mixState::mixed&&tagFlav==tagFlav::B0bar"),
ROOT.RooFit.MarkerColor(
ROOT.kCyan))
bmix.plotOn(frame2, ROOT.RooFit.Slice(tagFlav, "B0bar"), ROOT.RooFit.Slice(
mixState, "mixed"), ROOT.RooFit.LineColor(ROOT.kCyan))
# Plot unmixed slice for B0 and B0bar tagged data separately
frame3 = dt.frame(ROOT.RooFit.Title(
"B decay distribution of unmixed events (B0/B0bar)"))
data.plotOn(frame3, ROOT.RooFit.Cut(
"mixState==mixState::unmixed&&tagFlav==tagFlav::B0"))
bmix.plotOn(frame3, ROOT.RooFit.Slice(tagFlav, "B0"),
ROOT.RooFit.Slice(mixState, "unmixed"))
data.plotOn(
frame3,
ROOT.RooFit.Cut("mixState==mixState::unmixed&&tagFlav==tagFlav::B0bar"),
ROOT.RooFit.MarkerColor(
ROOT.kCyan))
bmix.plotOn(frame3, ROOT.RooFit.Slice(tagFlav, "B0bar"), ROOT.RooFit.Slice(
mixState, "unmixed"), ROOT.RooFit.LineColor(ROOT.kCyan))
# B-decay with CP violation
# -------------------------
# Construct pdf
# -------------------------
# Additional parameters needed for B decay with CPV
CPeigen = ROOT.RooRealVar("CPeigen", "CP eigen value", -1)
absLambda = ROOT.RooRealVar("absLambda", "|lambda|", 1, 0, 2)
argLambda = ROOT.RooRealVar("absLambda", "|lambda|", 0.7, -1, 1)
effR = ROOT.RooRealVar("effR", "B0/B0bar reco efficiency ratio", 1)
# Construct Bdecay with CP violation
bcp = ROOT.RooBCPEffDecay(
"bcp",
"bcp",
dt,
tagFlav,
tau,
dm,
w,
CPeigen,
absLambda,
argLambda,
effR,
dw,
tm,
ROOT.RooBCPEffDecay.DoubleSided)
# Plot scenario 1 - sin(2b)=0.7, |l|=1
# ---------------------------------------------------------------------------
# Generate some data
data2 = bcp.generate(ROOT.RooArgSet(dt, tagFlav), 10000)
# Plot B0 and B0bar tagged data separately
frame4 = dt.frame(ROOT.RooFit.Title(
"B decay distribution with CPV(|l|=1,Im(l)=0.7) (B0/B0bar)"))
data2.plotOn(frame4, ROOT.RooFit.Cut("tagFlav==tagFlav::B0"))
bcp.plotOn(frame4, ROOT.RooFit.Slice(tagFlav, "B0"))
data2.plotOn(frame4, ROOT.RooFit.Cut("tagFlav==tagFlav::B0bar"),
ROOT.RooFit.MarkerColor(ROOT.kCyan))
bcp.plotOn(frame4, ROOT.RooFit.Slice(tagFlav, "B0bar"),
ROOT.RooFit.LineColor(ROOT.kCyan))
# # Plot scenario 2 - sin(2b)=0.7, |l|=0.7
# -------------------------------------------------------------------------------
absLambda.setVal(0.7)
# Generate some data
data3 = bcp.generate(ROOT.RooArgSet(dt, tagFlav), 10000)
# Plot B0 and B0bar tagged data separately (sin2b = 0.7 plus direct CPV
# |l|=0.5)
frame5 = dt.frame(ROOT.RooFit.Title(
"B decay distribution with CPV(|l|=0.7,Im(l)=0.7) (B0/B0bar)"))
data3.plotOn(frame5, ROOT.RooFit.Cut("tagFlav==tagFlav::B0"))
bcp.plotOn(frame5, ROOT.RooFit.Slice(tagFlav, "B0"))
data3.plotOn(frame5, ROOT.RooFit.Cut("tagFlav==tagFlav::B0bar"),
ROOT.RooFit.MarkerColor(ROOT.kCyan))
bcp.plotOn(frame5, ROOT.RooFit.Slice(tagFlav, "B0bar"),
ROOT.RooFit.LineColor(ROOT.kCyan))
# Generic B-decay with user coefficients
# -------------------------
# Construct pdf
# -------------------------
# Model parameters
DGbG = ROOT.RooRealVar("DGbG", "DGamma/GammaAvg", 0.5, -1, 1)
Adir = ROOT.RooRealVar("Adir", "-[1-abs(l)**2]/[1+abs(l)**2]", 0)
Amix = ROOT.RooRealVar("Amix", "2Im(l)/[1+abs(l)**2]", 0.7)
Adel = ROOT.RooRealVar("Adel", "2Re(l)/[1+abs(l)**2]", 0.7)
# Derived input parameters for pdf
DG = ROOT.RooFormulaVar("DG", "Delta Gamma", "@1/@0",
ROOT.RooArgList(tau, DGbG))
# Construct coefficient functions for sin,cos, modulations of decay
# distribution
fsin = ROOT.RooFormulaVar(
"fsin", "fsin", "@0*@1*(1-2*@2)", ROOT.RooArgList(Amix, tagFlav, w))
fcos = ROOT.RooFormulaVar(
"fcos", "fcos", "@0*@1*(1-2*@2)", ROOT.RooArgList(Adir, tagFlav, w))
fsinh = ROOT.RooFormulaVar("fsinh", "fsinh", "@0", ROOT.RooArgList(Adel))
# Construct generic B decay pdf using above user coefficients
bcpg = ROOT.RooBDecay("bcpg", "bcpg", dt, tau, DG, ROOT.RooFit.RooConst(
1), fsinh, fcos, fsin, dm, tm, ROOT.RooBDecay.DoubleSided)
# Plot - Im(l)=0.7, e(l)=0.7 |l|=1, G/G=0.5
# -------------------------------------------------------------------------------------
# Generate some data
data4 = bcpg.generate(ROOT.RooArgSet(dt, tagFlav), 10000)
# Plot B0 and B0bar tagged data separately
frame6 = dt.frame(ROOT.RooFit.Title(
"B decay distribution with CPV(Im(l)=0.7,Re(l)=0.7,|l|=1,dG/G=0.5) (B0/B0bar)"))
data4.plotOn(frame6, ROOT.RooFit.Cut("tagFlav==tagFlav::B0"))
bcpg.plotOn(frame6, ROOT.RooFit.Slice(tagFlav, "B0"))
data4.plotOn(frame6, ROOT.RooFit.Cut("tagFlav==tagFlav::B0bar"),
ROOT.RooFit.MarkerColor(ROOT.kCyan))
bcpg.plotOn(frame6, ROOT.RooFit.Slice(tagFlav, "B0bar"),
ROOT.RooFit.LineColor(ROOT.kCyan))
c = ROOT.TCanvas("rf708_bphysics", "rf708_bphysics", 1200, 800)
c.Divide(3, 2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
frame1.GetYaxis().SetTitleOffset(1.6)
frame1.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
frame2.GetYaxis().SetTitleOffset(1.6)
frame2.Draw()
c.cd(3)
ROOT.gPad.SetLeftMargin(0.15)
frame3.GetYaxis().SetTitleOffset(1.6)
frame3.Draw()
c.cd(4)
ROOT.gPad.SetLeftMargin(0.15)
frame4.GetYaxis().SetTitleOffset(1.6)
frame4.Draw()
c.cd(5)
ROOT.gPad.SetLeftMargin(0.15)
frame5.GetYaxis().SetTitleOffset(1.6)
frame5.Draw()
c.cd(6)
ROOT.gPad.SetLeftMargin(0.15)
frame6.GetYaxis().SetTitleOffset(1.6)
frame6.Draw()
c.SaveAs("rf708_bphysics.png")
| root-mirror/root | tutorials/roofit/rf708_bphysics.py | Python | lgpl-2.1 | 7,789 |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Convert IPv6 addresses between textual representation and binary.
These functions are missing when python is compiled
without IPv6 support, on Windows for instance.
"""
import socket,struct
def inet_pton(af, addr):
"""Convert an IP address from text representation into binary form"""
print('hello')
if af == socket.AF_INET:
return inet_aton(addr)
elif af == socket.AF_INET6:
# IPv6: The use of "::" indicates one or more groups of 16 bits of zeros.
# We deal with this form of wildcard using a special marker.
JOKER = b"*"
while b"::" in addr:
addr = addr.replace(b"::", b":" + JOKER + b":")
joker_pos = None
# The last part of an IPv6 address can be an IPv4 address
ipv4_addr = None
if b"." in addr:
ipv4_addr = addr.split(b":")[-1]
result = b""
parts = addr.split(b":")
for part in parts:
if part == JOKER:
# Wildcard is only allowed once
if joker_pos is None:
joker_pos = len(result)
else:
raise Exception("Illegal syntax for IP address")
elif part == ipv4_addr: # FIXME: Make sure IPv4 can only be last part
# FIXME: inet_aton allows IPv4 addresses with less than 4 octets
result += socket.inet_aton(ipv4_addr)
else:
# Each part must be 16bit. Add missing zeroes before decoding.
try:
result += part.rjust(4, b"0").decode("hex")
except TypeError:
raise Exception("Illegal syntax for IP address")
# If there's a wildcard, fill up with zeros to reach 128bit (16 bytes)
if JOKER in addr:
result = (result[:joker_pos] + b"\x00" * (16 - len(result))
+ result[joker_pos:])
if len(result) != 16:
raise Exception("Illegal syntax for IP address")
return result
else:
raise Exception("Address family not supported")
def inet_ntop(af, addr):
"""Convert an IP address from binary form into text represenation"""
if af == socket.AF_INET:
return inet_ntoa(addr)
elif af == socket.AF_INET6:
# IPv6 addresses have 128bits (16 bytes)
if len(addr) != 16:
raise Exception("Illegal syntax for IP address")
parts = []
for left in [0, 2, 4, 6, 8, 10, 12, 14]:
try:
value = struct.unpack("!H", addr[left:left+2])[0]
hexstr = hex(value)[2:]
except TypeError:
raise Exception("Illegal syntax for IP address")
parts.append(hexstr.lstrip("0").lower())
result = b":".join(parts)
while b":::" in result:
result = result.replace(b":::", b"::")
# Leaving out leading and trailing zeros is only allowed with ::
if result.endswith(b":") and not result.endswith(b"::"):
result = result + b"0"
if result.startswith(b":") and not result.startswith(b"::"):
result = b"0" + result
return result
else:
raise Exception("Address family not supported yet")
| zverevalexei/trex-http-proxy | trex_client/external_libs/scapy-2.3.1/python3/scapy/pton_ntop.py | Python | mit | 3,502 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for the SkCode project.
"""
import os
from setuptools import setup
from skcode import __version__ as skcode_version
# Dump readme content as text
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# Allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# Setup config
setup(
name='skcode',
version=skcode_version,
author='Fabien Batteix',
author_email='[email protected]',
packages=['skcode', 'skcode.tags', 'skcode.utility'],
scripts=['skterm.py'],
include_package_data=True,
license='GPLv3',
description='SkCode - BBcode parser implementation for Python 3',
long_description=README,
url='https://github.com/TamiaLab/PySkCode',
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Text Processing :: Markup',
'Topic :: Text Processing :: Markup :: HTML',
],
install_requires=['Pygments>=2.0.2'],
tests_require=['nose>=1.3.7', 'coverage>=4.0.3'],
)
| TamiaLab/PySkCode | setup.py | Python | agpl-3.0 | 1,531 |
__author__ = 'Eleonor Bart'
from models import db, User, Role
from main import app
from flask_security import SQLAlchemyUserDatastore, Security
from flask_security.forms import ConfirmRegisterForm
from wtforms import StringField, validators
class ExtendedConfirmRegisterForm(ConfirmRegisterForm):
first_name = StringField('First Name', [validators.DataRequired()])
last_name = StringField('Last Name', [validators.DataRequired()])
# User registration, etc.
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore, confirm_register_form=ExtendedConfirmRegisterForm) | ElBell/VTDairyDB | controllers/security.py | Python | gpl-3.0 | 618 |
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
# with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'bugherd',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version = '0.1.dev1',
description = 'Access bugherd.com API',
long_description=long_description,
# The project's main homepage.
url = 'https://github.com/brooksc/bugherd', # use the URL to the github repo
# Author details
author = 'Brooks Cutter',
author_email = '[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# 'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Bug Tracking',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='bugherd',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['requests'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
) | brooksc/bugherd | setup.py | Python | mit | 4,028 |
"""A DjangoCMS plugin to build a product page with a 'single page only' layout"""
__version__ = '0.3.0'
| emencia/emencia-product-onepage | product_onepage/__init__.py | Python | mit | 104 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.HIGHEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces instances like 'CONCAT(A, B)' with 'CONCAT_WS(MID(CHAR(0), 0, 0), A, B)'
Requirement:
* MySQL
Tested against:
* MySQL 5.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that filter the CONCAT() function
>>> tamper('CONCAT(1,2)')
'CONCAT_WS(MID(CHAR(0),0,0),1,2)'
"""
if payload:
payload = payload.replace("CONCAT(", "CONCAT_WS(MID(CHAR(0),0,0),")
return payload
| pwnieexpress/raspberry_pwn | src/pentest/sqlmap/tamper/concat2concatws.py | Python | gpl-3.0 | 766 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.