text
stringlengths 4
1.02M
| meta
dict |
---|---|
from i18n import _
from repo import RepoError
import os, sys, atexit, signal, pdb, traceback, socket, errno, shlex, time
import util, commands, hg, lock, fancyopts, revlog, version, extensions, hook
import cmdutil
import ui as _ui
class ParseError(Exception):
"""Exception raised on errors in parsing the command line."""
def run():
"run the command in sys.argv"
sys.exit(dispatch(sys.argv[1:]))
def dispatch(args):
"run the command specified in args"
try:
u = _ui.ui(traceback='--traceback' in args)
except util.Abort, inst:
sys.stderr.write(_("abort: %s\n") % inst)
return -1
return _runcatch(u, args)
def _runcatch(ui, args):
def catchterm(*args):
raise util.SignalInterrupt
for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
num = getattr(signal, name, None)
if num: signal.signal(num, catchterm)
try:
try:
# enter the debugger before command execution
if '--debugger' in args:
pdb.set_trace()
try:
return _dispatch(ui, args)
finally:
ui.flush()
except:
# enter the debugger when we hit an exception
if '--debugger' in args:
pdb.post_mortem(sys.exc_info()[2])
ui.print_exc()
raise
except ParseError, inst:
if inst.args[0]:
ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
commands.help_(ui, inst.args[0])
else:
ui.warn(_("hg: %s\n") % inst.args[1])
commands.help_(ui, 'shortlist')
except cmdutil.AmbiguousCommand, inst:
ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
(inst.args[0], " ".join(inst.args[1])))
except cmdutil.UnknownCommand, inst:
ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
commands.help_(ui, 'shortlist')
except RepoError, inst:
ui.warn(_("abort: %s!\n") % inst)
except lock.LockHeld, inst:
if inst.errno == errno.ETIMEDOUT:
reason = _('timed out waiting for lock held by %s') % inst.locker
else:
reason = _('lock held by %s') % inst.locker
ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
except lock.LockUnavailable, inst:
ui.warn(_("abort: could not lock %s: %s\n") %
(inst.desc or inst.filename, inst.strerror))
except revlog.RevlogError, inst:
ui.warn(_("abort: %s!\n") % inst)
except util.SignalInterrupt:
ui.warn(_("killed!\n"))
except KeyboardInterrupt:
try:
ui.warn(_("interrupted!\n"))
except IOError, inst:
if inst.errno == errno.EPIPE:
if ui.debugflag:
ui.warn(_("\nbroken pipe\n"))
else:
raise
except socket.error, inst:
ui.warn(_("abort: %s\n") % inst[1])
except IOError, inst:
if hasattr(inst, "code"):
ui.warn(_("abort: %s\n") % inst)
elif hasattr(inst, "reason"):
try: # usually it is in the form (errno, strerror)
reason = inst.reason.args[1]
except: # it might be anything, for example a string
reason = inst.reason
ui.warn(_("abort: error: %s\n") % reason)
elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
if ui.debugflag:
ui.warn(_("broken pipe\n"))
elif getattr(inst, "strerror", None):
if getattr(inst, "filename", None):
ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
else:
ui.warn(_("abort: %s\n") % inst.strerror)
else:
raise
except OSError, inst:
if getattr(inst, "filename", None):
ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
else:
ui.warn(_("abort: %s\n") % inst.strerror)
except util.UnexpectedOutput, inst:
ui.warn(_("abort: %s") % inst[0])
if not isinstance(inst[1], basestring):
ui.warn(" %r\n" % (inst[1],))
elif not inst[1]:
ui.warn(_(" empty string\n"))
else:
ui.warn("\n%r\n" % util.ellipsis(inst[1]))
except ImportError, inst:
m = str(inst).split()[-1]
ui.warn(_("abort: could not import module %s!\n") % m)
if m in "mpatch bdiff".split():
ui.warn(_("(did you forget to compile extensions?)\n"))
elif m in "zlib".split():
ui.warn(_("(is your Python install correct?)\n"))
except util.Abort, inst:
ui.warn(_("abort: %s\n") % inst)
except MemoryError:
ui.warn(_("abort: out of memory\n"))
except SystemExit, inst:
# Commands shouldn't sys.exit directly, but give a return code.
# Just in case catch this and and pass exit code to caller.
return inst.code
except:
ui.warn(_("** unknown exception encountered, details follow\n"))
ui.warn(_("** report bug details to "
"http://www.selenic.com/mercurial/bts\n"))
ui.warn(_("** or [email protected]\n"))
ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
% version.get_version())
raise
return -1
def _findrepo(p):
while not os.path.isdir(os.path.join(p, ".hg")):
oldp, p = p, os.path.dirname(p)
if p == oldp:
return None
return p
def _parse(ui, args):
options = {}
cmdoptions = {}
try:
args = fancyopts.fancyopts(args, commands.globalopts, options)
except fancyopts.getopt.GetoptError, inst:
raise ParseError(None, inst)
if args:
cmd, args = args[0], args[1:]
aliases, i = cmdutil.findcmd(ui, cmd, commands.table)
cmd = aliases[0]
defaults = ui.config("defaults", cmd)
if defaults:
args = shlex.split(defaults) + args
c = list(i[1])
else:
cmd = None
c = []
# combine global options into local
for o in commands.globalopts:
c.append((o[0], o[1], options[o[1]], o[3]))
try:
args = fancyopts.fancyopts(args, c, cmdoptions)
except fancyopts.getopt.GetoptError, inst:
raise ParseError(cmd, inst)
# separate global options back out
for o in commands.globalopts:
n = o[1]
options[n] = cmdoptions[n]
del cmdoptions[n]
return (cmd, cmd and i[0] or None, args, options, cmdoptions)
def _parseconfig(config):
"""parse the --config options from the command line"""
parsed = []
for cfg in config:
try:
name, value = cfg.split('=', 1)
section, name = name.split('.', 1)
if not section or not name:
raise IndexError
parsed.append((section, name, value))
except (IndexError, ValueError):
raise util.Abort(_('malformed --config option: %s') % cfg)
return parsed
def _earlygetopt(aliases, args):
"""Return list of values for an option (or aliases).
The values are listed in the order they appear in args.
The options and values are removed from args.
"""
try:
argcount = args.index("--")
except ValueError:
argcount = len(args)
shortopts = [opt for opt in aliases if len(opt) == 2]
values = []
pos = 0
while pos < argcount:
if args[pos] in aliases:
if pos + 1 >= argcount:
# ignore and let getopt report an error if there is no value
break
del args[pos]
values.append(args.pop(pos))
argcount -= 2
elif args[pos][:2] in shortopts:
# short option can have no following space, e.g. hg log -Rfoo
values.append(args.pop(pos)[2:])
argcount -= 1
else:
pos += 1
return values
_loaded = {}
def _dispatch(ui, args):
# read --config before doing anything else
# (e.g. to change trust settings for reading .hg/hgrc)
config = _earlygetopt(['--config'], args)
if config:
ui.updateopts(config=_parseconfig(config))
# check for cwd
cwd = _earlygetopt(['--cwd'], args)
if cwd:
os.chdir(cwd[-1])
# read the local repository .hgrc into a local ui object
path = _findrepo(os.getcwd()) or ""
if not path:
lui = ui
if path:
try:
lui = _ui.ui(parentui=ui)
lui.readconfig(os.path.join(path, ".hg", "hgrc"))
except IOError:
pass
# now we can expand paths, even ones in .hg/hgrc
rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
if rpath:
path = lui.expandpath(rpath[-1])
lui = _ui.ui(parentui=ui)
lui.readconfig(os.path.join(path, ".hg", "hgrc"))
extensions.loadall(lui)
for name, module in extensions.extensions():
if name in _loaded:
continue
# setup extensions
# TODO this should be generalized to scheme, where extensions can
# redepend on other extensions. then we should toposort them, and
# do initialization in correct order
extsetup = getattr(module, 'extsetup', None)
if extsetup:
extsetup()
cmdtable = getattr(module, 'cmdtable', {})
overrides = [cmd for cmd in cmdtable if cmd in commands.table]
if overrides:
ui.warn(_("extension '%s' overrides commands: %s\n")
% (name, " ".join(overrides)))
commands.table.update(cmdtable)
_loaded[name] = 1
# check for fallback encoding
fallback = lui.config('ui', 'fallbackencoding')
if fallback:
util._fallbackencoding = fallback
fullargs = args
cmd, func, args, options, cmdoptions = _parse(lui, args)
if options["config"]:
raise util.Abort(_("Option --config may not be abbreviated!"))
if options["cwd"]:
raise util.Abort(_("Option --cwd may not be abbreviated!"))
if options["repository"]:
raise util.Abort(_(
"Option -R has to be separated from other options (i.e. not -qR) "
"and --repository may only be abbreviated as --repo!"))
if options["encoding"]:
util._encoding = options["encoding"]
if options["encodingmode"]:
util._encodingmode = options["encodingmode"]
if options["time"]:
def get_times():
t = os.times()
if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
t = (t[0], t[1], t[2], t[3], time.clock())
return t
s = get_times()
def print_time():
t = get_times()
ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
(t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
atexit.register(print_time)
ui.updateopts(options["verbose"], options["debug"], options["quiet"],
not options["noninteractive"], options["traceback"])
if options['help']:
return commands.help_(ui, cmd, options['version'])
elif options['version']:
return commands.version_(ui)
elif not cmd:
return commands.help_(ui, 'shortlist')
repo = None
if cmd not in commands.norepo.split():
try:
repo = hg.repository(ui, path=path)
ui = repo.ui
if not repo.local():
raise util.Abort(_("repository '%s' is not local") % path)
ui.setconfig("bundle", "mainreporoot", repo.root)
except RepoError:
if cmd not in commands.optionalrepo.split():
if args and not path: # try to infer -R from command args
repos = map(_findrepo, args)
guess = repos[0]
if guess and repos.count(guess) == len(repos):
return _dispatch(ui, ['--repository', guess] + fullargs)
if not path:
raise RepoError(_("There is no Mercurial repository here"
" (.hg not found)"))
raise
d = lambda: func(ui, repo, *args, **cmdoptions)
else:
d = lambda: func(ui, *args, **cmdoptions)
# run pre-hook, and abort if it fails
ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs))
if ret:
return ret
ret = _runcommand(ui, options, cmd, d)
# run post-hook, passing command result
hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
result = ret)
return ret
def _runcommand(ui, options, cmd, cmdfunc):
def checkargs():
try:
return cmdfunc()
except TypeError, inst:
# was this an argument error?
tb = traceback.extract_tb(sys.exc_info()[2])
if len(tb) != 2: # no
raise
raise ParseError(cmd, _("invalid arguments"))
if options['profile']:
import hotshot, hotshot.stats
prof = hotshot.Profile("hg.prof")
try:
try:
return prof.runcall(checkargs)
except:
try:
ui.warn(_('exception raised - generating '
'profile anyway\n'))
except:
pass
raise
finally:
prof.close()
stats = hotshot.stats.load("hg.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(40)
elif options['lsprof']:
try:
from mercurial import lsprof
except ImportError:
raise util.Abort(_(
'lsprof not available - install from '
'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
p = lsprof.Profiler()
p.enable(subcalls=True)
try:
return checkargs()
finally:
p.disable()
stats = lsprof.Stats(p.getstats())
stats.sort()
stats.pprint(top=10, file=sys.stderr, climit=5)
else:
return checkargs()
| {
"content_hash": "df473ebfd18b5041c5de7f400d95d0f2",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 80,
"avg_line_length": 34.702439024390245,
"alnum_prop": 0.5379533314590947,
"repo_name": "carlgao/lenga",
"id": "c449fe74788e9356cda264fad5398c7527f369df",
"size": "14474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "images/lenny64-peon/usr/share/python-support/mercurial-common/mercurial/dispatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "3281"
},
{
"name": "C#",
"bytes": "97763"
},
{
"name": "CSS",
"bytes": "39383"
},
{
"name": "Emacs Lisp",
"bytes": "6274490"
},
{
"name": "Frege",
"bytes": "463786"
},
{
"name": "IDL",
"bytes": "377510"
},
{
"name": "JavaScript",
"bytes": "1032063"
},
{
"name": "Mathematica",
"bytes": "11862"
},
{
"name": "Perl",
"bytes": "57841501"
},
{
"name": "Prolog",
"bytes": "9867"
},
{
"name": "Python",
"bytes": "10875379"
},
{
"name": "Ruby",
"bytes": "72162"
},
{
"name": "Shell",
"bytes": "22775"
},
{
"name": "Slash",
"bytes": "126702"
},
{
"name": "SystemVerilog",
"bytes": "105693"
},
{
"name": "TeX",
"bytes": "742855"
},
{
"name": "VimL",
"bytes": "1845"
},
{
"name": "XProc",
"bytes": "22962"
},
{
"name": "XSLT",
"bytes": "4075"
}
],
"symlink_target": ""
} |
from setuptools import setup
from setuptools.extension import Extension
setup_requires = [
'cython>=0.x',
'pytest-runner',
]
install_requires = [
'ujson',
]
tests_require = [
'hypothesis',
'pytest-benchmark',
'pytest',
]
extensions = [
Extension(
"jsonsubset.deps.xxhash_cython.xxhash",
[
'jsonsubset/deps/xxhash_cython/xxhash.pyx',
'jsonsubset/deps/xxhash_cython/xxHash/xxhash.c'
],
include_dirs = ["jsonsubset/deps/xxhash_cython/xxHash"],
extra_compile_args=["-O3"]
),
Extension(
"jsonsubset.subset",
['jsonsubset/subset.pyx'],
language='c++',
extra_compile_args=["-O3"]
),
Extension(
"jsonsubset.json_parser",
['jsonsubset/json_parser.pyx'],
language='c++',
extra_compile_args=["-O3"]
),
]
setup(
name='jsonsubset',
version='0.1.1',
url='https://github.com/kawmarco/jsonsubset',
description="Extract and parse specific fields from a JSON string ",
author="Marco Kawajiri",
keywords=[
'jsonsubset',
'json',
'select',
],
license="MIT license",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
packages=[
"jsonsubset",
"jsonsubset.deps.xxhash_cython",
],
package_data={
'jsonsubset': ['*.pxd'],
"jsonsubset.deps.xxhash_cython": ['*.pxd'],
},
package_dir={
'jsonsubset': 'jsonsubset',
'jsonsubset.deps.xxhash_cython': 'jsonsubset/deps/xxhash_cython',
},
ext_modules=extensions,
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
)
| {
"content_hash": "0e00b7c105dac612287ba90a6ade0ab0",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 73,
"avg_line_length": 25.129411764705882,
"alnum_prop": 0.5735018726591761,
"repo_name": "kawmarco/jsonsubset",
"id": "864117f3ed85ae7e502eb00e771ff7a77f9ba164",
"size": "2136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21658"
},
{
"name": "Shell",
"bytes": "201"
}
],
"symlink_target": ""
} |
import utils
import favourite
import os
import xbmc
ROOT = utils.ROOT
FILENAME = utils.FILENAME
def getDefaultSearch():
file = os.path.join(ROOT, 'S', FILENAME)
faves = favourite.getFavourites(file)
if len(faves) > 0:
return faves[0]
return None
| {
"content_hash": "4b180d827f328df1e23e8cb11eba4d8f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 45,
"avg_line_length": 16.705882352941178,
"alnum_prop": 0.6619718309859155,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "f7ba430009dcb16688d0bbb90aab884b90e2a706",
"size": "1093",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "plugin.program.super.favourites/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
import lief
import sys
import termcolor as tc
import tempfile
import subprocess
import json
import angr
from capstone import *
is_lesser = lambda addr, sect: addr < sect.virtual_address
is_greater = lambda addr, sect: addr > sect.virtual_address + sect.size
def get_elf_class_str(identity_class):
if identity_class == lief.ELF.ELF_CLASS.CLASS32:
return "32"
elif identity_class == lief.ELF.ELF_CLASS.CLASS64:
return "64"
else:
print("ELF_CLASS is NONE, aborting disassembling...")
return None
def get_capstone_arch(machine_type, mode):
if machine_type == lief.ELF.ARCH.ARM:
if mode == "32":
return CS_ARCH_ARM
else:
return CS_ARCH_ARM64
elif machine_type == lief.ELF.ARCH.MIPS:
return CS_ARCH_MIPS
elif machine_type == lief.ELF.ARCH.PPC:
return CS_ARCH_PPC
elif machine_type == lief.ELF.ARCH.i386:
return CS_ARCH_X86
elif machine_type == lief.ELF.ARCH.x86_64:
return CS_ARCH_X86
else:
print("Unsupported architecture, aborting disassembling...")
return None
def get_capstone_mode(arch, mode):
if (arch == CS_ARCH_ARM) or (arch == CS_ARCH_ARM64):
return CS_MODE_ARM
elif arch == CS_ARCH_MIPS:
if mode == "32":
return CS_MODE_MIPS32
else:
return CS_MODE_MIPS64
elif arch == CS_ARCH_PPC:
if mode == "32":
return CS_MODE_32
else:
return CS_MODE_64
elif arch == CS_ARCH_X86:
if mode == "32":
return CS_MODE_32
else:
return CS_MODE_64
else:
raise "Unsupported capstone arch"
def disas(binary, addr, length):
print(tc.colored("<Capstone disassembly>", "green"))
mode = get_elf_class_str(binary.header.identity_class)
if mode == None:
return
arch = get_capstone_arch(binary.header.machine_type, mode)
if arch == None:
return
dis_mode = get_capstone_mode(arch, mode)
try:
code = bytes(binary.get_content_from_virtual_address(addr, length))
except lief.not_found as err:
print(err)
return
asm_code = ""
md = Cs(arch, dis_mode)
for i in md.disasm(code, addr):
asm_code += "0x{0}:\t{1}\t{2}\n".format(i.address, i.mnemonic, i.op_str)
return asm_code
def check_entrypoint(binary):
entrypoint = binary.header.entrypoint
section = binary.section_from_virtual_address(entrypoint)
print(tc.colored("Entrypoint at virtual address: {}".format(hex(entrypoint)), "green"))
print("{0} {1} {2}".format(tc.colored("Section:","green"),
tc.colored(section.name, "red"),
tc.colored("contains the entrypoint", "green")))
if is_lesser(entrypoint, section) or is_greater(entrypoint, section):
print(tc.colored("Suspicious", "red"))
else:
print(tc.colored("OK", "cyan"))
print(disas(binary, entrypoint, 0x30))
print("Done\n")
def check_rwx_sections(binary):
print(tc.colored("Segments with PF_W + PF_X or PF_R + PF_W + PF_X flags", "green"))
# check segments that have PF_W + PF_X or PF_R + PF_W + PF_X
wx_flags = lief.ELF.SEGMENT_FLAGS.W | lief.ELF.SEGMENT_FLAGS.X
rwx_flags = lief.ELF.SEGMENT_FLAGS.R | wx_flags
for seg in binary.segments:
if seg.flags == wx_flags or seg.flags == rwx_flags:
print("{0} {1}".format(tc.colored("Segment:", "cyan"),
tc.colored(str(seg.type).split('.')[1], "red")))
print("Done\n")
def get_register_size(binary):
elf_class = get_elf_class_str(binary.header.identity_class)
if elf_class == "64":
return 8
else:
return 4
def check_ctors_array(binary):
print(tc.colored("Check if .ctors/.fini_array function pointers "
"were (possibly) patched", "green"))
reg_size = get_register_size(binary)
if binary.has_section(".ctors"):
sect = binary.get_section(".ctors")
elif binary.has_section(".init_array"):
sect = binary.get_section(".init_array")
else:
print(tc.colored(".init_array not found", "white"))
return
content = sect.content
for i in range(0, sect.size, reg_size):
addr = int.from_bytes(content[i : i + reg_size], byteorder="little")
if (hex(addr) == ("0x" + "ff" * reg_size)) or (hex(addr) == "0x0"):
continue
print("{0} {1}".format(tc.colored("Checking address: ", "cyan"),
tc.colored(hex(addr), "yellow")), end=' ')
text_sect = binary.get_section(".text")
if is_lesser(addr, text_sect) or is_greater(addr, text_sect):
print("{0}".format(tc.colored("is outside of .text section", "red")))
else:
print("{0}".format(tc.colored("OK", "cyan")))
print("Done\n")
def check_got_and_plt(binary):
print(tc.colored("Check if GOT entries were patched", "green"))
reg_size = get_register_size(binary)
# Analyse only executables and shared libraries
if binary.has_section(".plt"):
plt = binary.get_section(".plt")
print("{0} {1} {2}".format(tc.colored(".plt at", "green"),
tc.colored(hex(plt.virtual_address), "yellow"),
tc.colored(hex(plt.virtual_address + plt.size), "yellow")))
else:
raise lief.not_found
if binary.has_section(".got.plt"):
got_plt = binary.get_section(".got.plt")
else:
print(tc.colored(".got.plt not found", "white"))
return
content = got_plt.content
# ignore first 3 entries in GOT, because they are reserved
for i in range(3 * reg_size, got_plt.size, reg_size):
addr = int.from_bytes(content[i : i + reg_size], byteorder="little")
print("{0} {1}".format(tc.colored("Checking address: ", "cyan"),
tc.colored(hex(addr), "yellow")), end=' ')
if is_lesser(addr, plt) or is_greater(addr, plt):
print("{0}".format(tc.colored("is outside of .plt section", "red")))
for r in binary.pltgot_relocations:
if (r.address == (got_plt.virtual_address + i)) and r.has_symbol:
print("{0} {1} {2}".format(tc.colored(hex(addr), "yellow"),
tc.colored("should point to", "green"),
tc.colored(r.symbol, "yellow")))
break
print(disas(binary, addr, 0x30))
else:
print("{0}".format(tc.colored("OK", "cyan")))
print("Done\n")
# TO DO: pattern match trampolines instead of outputing all prologues
def check_funcs_trampoline(binary, path):
print(tc.colored("Check if function(s) prologue contain a trampoline", "green"))
# if binary has PIE, angr loads by default at 0x400000, so when disassembling we need
# to subtract that base address, otherwise RVA used in capstone will be incorrect.
base_address_delta = 0x400000 if binary.is_pie else 0
proj = angr.Project(path, auto_load_libs=False)
cfg = proj.analyses.CFG()
funcs = {}
for k, v in dict(proj.kb.functions).items():
funcs[v.name] = v.addr - base_address_delta
for fname in binary.imported_functions:
if fname in funcs:
del funcs[fname]
for fname, faddr in funcs.items():
print("{0} @ {1}".format(tc.colored(fname, "cyan"), tc.colored(hex(faddr), "yellow")))
prologue = disas(binary, faddr, 0xA)
mnemonics = ["jmp", "ret", "retf", "retn", "call", "fld", "fistp", "movd"]
if (prologue is not None) and any(mnemonic in prologue for mnemonic in mnemonics):
print(prologue)
else:
print("{0}".format(tc.colored("OK", "cyan")))
print("Done\n")
def check_dynamic_entries(binary):
print(tc.colored("Check dynamic entries injection", "green"))
# Normally NEEDED dynamic entries are consecutive, check for entries that aren't consecutive
last_needed_entry = None
for i, d in enumerate(binary.dynamic_entries, start=1):
if d.tag == lief.ELF.DYNAMIC_TAGS.NEEDED:
if last_needed_entry == None:
last_needed_entry = i
else:
if (i - last_needed_entry) > 1:
print("{0} {1} {2}".format(tc.colored("Suspicious NEEDED entry, index", "green"),
tc.colored(str(i), "red"),
tc.colored(d.name, "red")))
else:
last_needed_entry = i
print("Done\n")
def analyse():
if len(sys.argv) < 2:
print("[USAGE]: {0} <executable>".format(sys.argv[0]))
sys.exit(1)
try:
binary = lief.ELF.parse(sys.argv[1])
except lief.bad_file as err:
print("Error: {0}".format(err))
sys.exit(1)
check_entrypoint(binary)
check_rwx_sections(binary)
check_ctors_array(binary)
check_got_and_plt(binary)
check_funcs_trampoline(binary, sys.argv[1])
check_dynamic_entries(binary)
if __name__ == "__main__":
analyse()
| {
"content_hash": "88e35e3f4f556503be52d10d11455b6c",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 101,
"avg_line_length": 33.236559139784944,
"alnum_prop": 0.5732772565512779,
"repo_name": "ner0x652/RElief",
"id": "6854c070086c7b844bd54fdcd06b00fd08d241db",
"size": "9297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elforensics.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16349"
}
],
"symlink_target": ""
} |
import typing
from .. import client_list_jobs
from .. import create_job
if typing.TYPE_CHECKING:
from google.cloud import bigquery
import pytest
def test_client_list_jobs(
capsys: "pytest.CaptureFixture[str]", client: "bigquery.Client"
) -> None:
job = create_job.create_job()
client.cancel_job(job.job_id)
job.cancel()
client_list_jobs.client_list_jobs()
out, err = capsys.readouterr()
assert "Started job: {}".format(job.job_id) in out
assert "Last 10 jobs:" in out
assert "Jobs from the last ten minutes:" in out
assert "Last 10 jobs run by all users:" in out
assert "Last 10 jobs done:" in out
| {
"content_hash": "ab2a36821c15e566f455c8863bb72c6c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 27.291666666666668,
"alnum_prop": 0.6778625954198473,
"repo_name": "googleapis/python-bigquery",
"id": "a2845b7ad478842b8bf77b3ce19b98b18ba994d7",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/tests/test_client_list_jobs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2520564"
},
{
"name": "Shell",
"bytes": "31939"
}
],
"symlink_target": ""
} |
import json
from mockito import *
import os
import shutil
import tempfile
import unittest
from ice.history import ManagedROMArchive
class ManagedROMArchiveTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.temppath = os.path.join(self.tempdir, "tempfile")
self.mock_user = mock()
self.mock_user.user_id = 1234
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_previous_managed_ids_returns_none_for_missing_file(self):
missing_path = os.path.join("some", "stupid", "path")
self.assertFalse(os.path.exists(missing_path))
archive = ManagedROMArchive(missing_path)
self.assertIsNone(archive.previous_managed_ids(self.mock_user))
def test_previous_managed_ids_raises_exception_for_malformed_json(self):
with open(self.temppath, "w+") as f:
f.write("notrealjson")
with self.assertRaises(ValueError):
archive = ManagedROMArchive(self.temppath)
def test_previous_managed_ids_returns_empty_list_for_missing_user(self):
data = {
"1337": []
}
with open(self.temppath, "w+") as f:
f.write(json.dumps(data))
archive = ManagedROMArchive(self.temppath)
self.assertEquals(archive.previous_managed_ids(self.mock_user), [])
def test_previous_managed_ids_returns_list_from_json(self):
data = {
"1234": [
"1234567890",
"0987654321",
]
}
with open(self.temppath, "w+") as f:
f.write(json.dumps(data))
archive = ManagedROMArchive(self.temppath)
self.assertEquals(archive.previous_managed_ids(self.mock_user), ["1234567890","0987654321"])
def test_set_managed_ids_creates_new_file_if_needed(self):
self.assertFalse(os.path.exists(self.temppath))
archive = ManagedROMArchive(self.temppath)
archive.set_managed_ids(self.mock_user, ["1234567890"])
self.assertTrue(os.path.exists(self.temppath))
def test_previous_managed_ids_returns_new_value_after_set_managed_ids(self):
archive = ManagedROMArchive(self.temppath)
new_ids = ["1234567890"]
self.assertNotEqual(archive.previous_managed_ids(self.mock_user), new_ids)
archive.set_managed_ids(self.mock_user, ["1234567890"])
self.assertEqual(archive.previous_managed_ids(self.mock_user), new_ids)
def test_creating_new_archive_after_set_managed_ids_uses_new_ids(self):
archive = ManagedROMArchive(self.temppath)
new_ids = ["1234567890"]
self.assertNotEqual(archive.previous_managed_ids(self.mock_user), new_ids)
archive.set_managed_ids(self.mock_user, ["1234567890"])
new_archive = ManagedROMArchive(self.temppath)
self.assertEqual(new_archive.previous_managed_ids(self.mock_user), new_ids)
| {
"content_hash": "164f32f0683672d679152bc78e0023fa",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 96,
"avg_line_length": 33.5875,
"alnum_prop": 0.7097134350576851,
"repo_name": "scottrice/Ice",
"id": "3968c38b1495492756061f5192d7eedb97167416",
"size": "2688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/managed_rom_archive_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140642"
}
],
"symlink_target": ""
} |
"""
Although it is usually not a good idea to explicitly point to a single
ttf file for a font instance, you can do so using the
font_manager.FontProperties fname argument (for a more flexible
solution, see the font_fmaily_rc.py and fonts_demo.py examples).
"""
import sys
import os
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
if sys.platform == 'win32':
fpath = 'C:\\Windows\\Fonts\\Tahoma.ttf'
elif sys.platform.startswith('linux'):
basedir = '/usr/share/fonts/truetype'
fonts = ['freefont/FreeSansBoldOblique.ttf',
'ttf-liberation/LiberationSans-BoldItalic.ttf',
'msttcorefonts/Comic_Sans_MS.ttf']
for fpath in fonts:
if os.path.exists(os.path.join(basedir, fpath)):
break
else:
fpath = '/Library/Fonts/Tahoma.ttf'
if os.path.exists(fpath):
prop = fm.FontProperties(fname=fpath)
fname = os.path.split(fpath)[1]
ax.set_title('this is a special font: %s' % fname, fontproperties=prop)
else:
ax.set_title('Demo fails--cannot find a demo font')
ax.set_xlabel('This is the default font')
plt.show()
| {
"content_hash": "2d2310d499ae0f416677f86e18ecaa52",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 31.16216216216216,
"alnum_prop": 0.6903729401561145,
"repo_name": "bundgus/python-playground",
"id": "9f321afeded4fba4cd86c7e489d6e19889556c60",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matplotlib-playground/examples/api/font_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "897"
},
{
"name": "HTML",
"bytes": "22309040"
},
{
"name": "Jupyter Notebook",
"bytes": "666681"
},
{
"name": "Python",
"bytes": "1046557"
},
{
"name": "Thrift",
"bytes": "58"
}
],
"symlink_target": ""
} |
from p2ner.abstract.pipeelement import PipeElement
import sys, socket
from twisted.internet import reactor, defer
from twisted.internet.protocol import DatagramProtocol
import time
from random import uniform
class UDPPortElement(PipeElement, DatagramProtocol):
def initElement(self, port=50000, interface='', to='port', **kwargs):
self.port = port
self.exPort=port
self.interface = interface
self.to = to
self.log.info('UDPPortElement component loaded')
self.controlBW=0
def getExPort(self,d):
return self.exPort
def getPort(self):
return self.port
def setPort(self,port):
self.port=port
def listen(self, d):
if "listener" in self:
return
self.listener = reactor.listenUDP(self.port, self) #, interface=self.interface)
self.log.info('start listening to port:%d',self.port)
print 'listening to port ',self.port
if sys.platform == 'win32':
sockhandler = self.listener.getHandle()
sockhandler.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 131071)
sockhandler.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 131071)
def datagramReceived(self, data, (host, port)):
recTime = time.time()
d = self.forwardprev("receive", (host, port), recTime)
#reactor.callLater(0, d.callback, data)
d.callback(data)
def send(self, res, msg, data, peer):
to=self.to
useLocalIp=False
try: #for the server
if self.root.netChecker.nat and peer.ip==self.root.netChecker.externalIp:
useLocalIp=True
peer.useLocalIp=True
except:
pass
if peer.useLocalIp:
ip=peer.lip
to='l'+to
else:
ip=peer.ip
if peer.natType==3 and getattr(peer,'nat'+to):
to='nat'+to
#print 'send to:',ip,to,getattr(peer, to)
if isinstance(res, (list, tuple)):
for r in res:
self.sockwrite(r, ip, getattr(peer, to))
else:
self.sockwrite(res, ip, getattr(peer, to))
return res
def sockwrite(self, data, host, port):
if len(data):
self.controlBW +=len(data)
self.listener.write(data, (host, port))
return data
def cleanUp(self, d=None):
if "listener" in self:
self.listener.stopListening()
def doStop(self, d=None):
self.log.debug('stop listening to port %d',self.port)
print self.port
self.listener.stopListening()
def getStats(self):
ret=self.controlBW
self.controlBW=0
return [(-1,'controlBW',ret)]
| {
"content_hash": "b622a9626c6310735eef7c8722e86d72",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 87,
"avg_line_length": 28.329896907216494,
"alnum_prop": 0.5931586608442504,
"repo_name": "schristakidis/p2ner",
"id": "5a195539588cb72f8a8832950e3fdc7a856171ef",
"size": "3391",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "p2ner/components/pipeelement/udpportelement/udpportelement/udpport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303"
},
{
"name": "Python",
"bytes": "1319300"
}
],
"symlink_target": ""
} |
from .models import Task, get_repeating_data
from django.core import serializers
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.views.generic import View
import datetime
import json
class GetJson(View):
def get(self, request, *args, **kwargs):
date = kwargs.get("date")
todo_tasks = get_repeating_data(date)
return HttpResponse(json.dumps(todo_tasks), content_type="application/json")
class Snooze(View):
def patch(self, request, *args, **kwargs):
task = get_object_or_404(Task, pk=kwargs["task_id"])
days = int(kwargs["days"])
if days < 0:
task.show_immediately = True
task.snooze = None
task.save()
else:
task.snooze_by(days)
return HttpResponse("ok")
class Done(View):
def patch(self, request, *args, **kwargs):
task = get_object_or_404(Task, pk=kwargs["task_id"])
if task.time_since_completion() and task.time_since_completion() < datetime.timedelta(minutes=5):
return HttpResponse("too_soon")
task.completed()
return HttpResponse("ok")
| {
"content_hash": "c9707d6eccf86fab38d109c6e627cff8",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 105,
"avg_line_length": 29.923076923076923,
"alnum_prop": 0.6392459297343616,
"repo_name": "ojarva/home-info-display",
"id": "895266ade16bef60cef6d604b156143b0e130be2",
"size": "1167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homedisplay/repeating_tasks/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22171"
},
{
"name": "CoffeeScript",
"bytes": "115283"
},
{
"name": "HTML",
"bytes": "51598"
},
{
"name": "JavaScript",
"bytes": "9902"
},
{
"name": "Python",
"bytes": "310675"
},
{
"name": "Shell",
"bytes": "1617"
}
],
"symlink_target": ""
} |
from symbol.builder import FasterRcnn as Detector
from symbol.builder import add_anchor_to_arg
from models.efficientnet.builder import EfficientNetB5FPN as Backbone
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRpnHead as RpnHead
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 8 if is_train else 1
fp16 = True
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="localbn", ndev=len(KvstoreParam.gpus))
# normalizer = normalizer_factory(type="gn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (4,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
image_anchor = 256
max_side = 700
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
total_image = 82783 + 35504
else:
image_set = ("coco_val2017", )
total_image = 5000
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
roi_extractor = RoiExtractor(RoiParam)
bbox_head = BboxHead(BboxParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
rpn_test_sym = None
test_sym = None
else:
train_sym = None
rpn_test_sym = detector.get_rpn_test_symbol(backbone, neck, rpn_head)
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
rpn_test_symbol = rpn_test_sym
from_scratch = True
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = None
epoch = 0
fixed_param = []
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 1e-4
clip_gradient = None
class schedule:
mult = 12
begin_epoch = 0
end_epoch = 6 * mult
if mult <= 2:
lr_iter = [60000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
else:
# follow the setting in Rethinking ImageNet Pre-training
# reduce the lr in the last 60k and 20k iterations
lr_iter = [(DatasetParam.total_image * 2 // 16 * end_epoch - 70000) * 16 //
(len(KvstoreParam.gpus) * KvstoreParam.batch_image),
(DatasetParam.total_image * 2 // 16 * end_epoch - 30000) * 16 //
(len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
# data processing
class ResizeParam:
short = 400
long = 600
class PadParam:
short = 400
long = 600
max_num_gt = 100
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (100, 50, 25, 13, 7)
self.long = (150, 75, 38, 19, 10)
scales = (4)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox", "im_info"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| {
"content_hash": "43349ca273723b406fd91681cd470345",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 100,
"avg_line_length": 29.706070287539937,
"alnum_prop": 0.5458163045816304,
"repo_name": "TuSimple/simpledet",
"id": "4888e68691587fb5ae97a8719c3bbedb347b6c76",
"size": "9298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/efficientnet/efficientnet_b5_fpn_bn_scratch_400_12x.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "471944"
},
{
"name": "Cuda",
"bytes": "212680"
},
{
"name": "Makefile",
"bytes": "153"
},
{
"name": "Python",
"bytes": "1567733"
},
{
"name": "Shell",
"bytes": "5501"
}
],
"symlink_target": ""
} |
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
| {
"content_hash": "504bfa290003c0ed6357e845b77df195",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 79,
"avg_line_length": 40.625,
"alnum_prop": 0.3076923076923077,
"repo_name": "cadithealth/templatealchemy",
"id": "404640c0bc5ba9c8deb9148266880aeeca93ec1d",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templatealchemy_driver/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "528"
},
{
"name": "Makefile",
"bytes": "300"
},
{
"name": "Mako",
"bytes": "175"
},
{
"name": "Python",
"bytes": "51030"
},
{
"name": "Ruby",
"bytes": "126"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class SpiritUserAuthConfig(AppConfig):
name = 'spirit.user.auth'
verbose_name = "Spirit User Auth"
label = 'spirit_user_auth'
| {
"content_hash": "d517579235922982c216f03e456c8fd1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 38,
"avg_line_length": 21.875,
"alnum_prop": 0.7142857142857143,
"repo_name": "nitely/Spirit",
"id": "40ea3c84454a305fd9bd57539b933edc48dc9fd9",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spirit/user/auth/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "207949"
},
{
"name": "CoffeeScript",
"bytes": "105109"
},
{
"name": "HTML",
"bytes": "171485"
},
{
"name": "JavaScript",
"bytes": "2759"
},
{
"name": "Makefile",
"bytes": "709"
},
{
"name": "Python",
"bytes": "854233"
},
{
"name": "SCSS",
"bytes": "94771"
}
],
"symlink_target": ""
} |
import json
import string
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.db import models
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from djiffy.models import Canvas, Manifest
from sortedm2m.fields import SortedManyToManyField
from unidecode import unidecode
from derrida.common.models import DateRange, Named, Notable
from derrida.common.utils import absolutize_url
from derrida.footnotes.models import Footnote
from derrida.people.models import Person
from derrida.places.models import Place
class WorkCount(models.Model):
'''Mix-in for models related to works; adds work count property and link to
associated works'''
class Meta:
abstract = True
def work_count(self):
'''
Return number of associated :class:`derrida.books.models.Works` for
a given object as an HTML snippet for the Django admin.
'''
base_url = reverse('admin:books_work_changelist')
return mark_safe('<a href="%s?%ss__id__exact=%s">%s</a>' % (
base_url,
self.__class__.__name__.lower(),
self.pk,
self.work_set.count()
))
work_count.short_description = '# works'
# NOTE: possible to use a count field for admin ordering!
# see https://mounirmesselmeni.github.io/2016/03/21/how-to-order-a-calculated-count-field-in-djangos-admin/
# book_count.admin_order_field = 'work__count'
class InstanceCount(models.Model):
'''Mix-in for models related to books; adds book count property and link to
associated books'''
class Meta:
abstract = True
def instance_count(self):
'''
Return a count of associated :class:`derrida.books.models.Instance` for
object as an HTML snippet for the Django admin.
'''
base_url = reverse('admin:books_instance_changelist')
return mark_safe('<a href="%s?%ss__id__exact=%s">%s</a>' % (
base_url,
self.__class__.__name__.lower(),
self.pk,
self.instance_set.count()
))
instance_count.short_description = '# instances'
class Subject(Named, Notable, WorkCount):
'''Subject categorization for books'''
#: optional uri
uri = models.URLField(blank=True, null=True)
class Language(Named, Notable, WorkCount, InstanceCount):
'''Language that a book is written in or a language included in a book'''
#: optional uri
uri = models.URLField(blank=True, null=True)
code = models.CharField(blank=True, null=True, max_length=3,
help_text='two or three letter language code from ISO 639')
class Publisher(Named, Notable, InstanceCount):
'''Publisher of a book'''
class OwningInstitution(Named, Notable, InstanceCount):
'''Institution that owns the extant copy of a book'''
#: short name (optioal)
short_name = models.CharField(max_length=255, blank=True,
help_text='Optional short name for admin display')
#: contact information
contact_info = models.TextField()
#: :class:`~derrida.places.models.Place`
place = models.ForeignKey(Place)
def __str__(self):
return self.short_name or self.name
class Journal(Named, Notable):
'''List of associated journals for items published as journal articles'''
class Work(Notable):
'''A platonic work. Stores common information about multiple
instances, copies, or editions of the same work. Aggregates one
or more :class:`Instance` objects.'''
#: primary title
primary_title = models.TextField()
#: short title
short_title = models.CharField(max_length=255)
#: original publication date
year = models.IntegerField(blank=True, null=True,
help_text='Original publication date')
# NOTE: this is inteneded for a generic linked data URI;
# finding aid URL should be tracked on Instance rather than Work
#: optional URI
uri = models.URLField('URI', blank=True, help_text='Linked data URI',
default='')
#: relation to :class:`Person` authors
authors = models.ManyToManyField(Person, blank=True)
#: :class:`Subject` related through :class:`WorkSubject`
subjects = models.ManyToManyField(Subject, through='WorkSubject')
#: :class:`Language` related through :class:`WorkLanguage`
languages = models.ManyToManyField(Language, through='WorkLanguage')
class Meta:
ordering = ['primary_title']
verbose_name = 'Derrida library work'
def __str__(self):
return '%s (%s)' % (self.short_title, self.year or 'n.d.')
def author_names(self):
'''Display author names; convenience access for display in admin'''
# NOTE: possibly might want to use last names here
return ', '.join(str(auth) for auth in self.authors.all())
author_names.short_description = 'Authors'
author_names.admin_order_field = 'authors__authorized_name'
def instance_count(self):
'''
Return count of :class:`derrida.book.models.Instance` associated with
:class:`Work` formatted as an HTML snippet for the Django admin.
'''
base_url = reverse('admin:books_instance_changelist')
return mark_safe('<a href="%s?%ss__id__exact=%s">%s</a>' % (
base_url,
self.__class__.__name__.lower(),
self.pk,
self.instance_set.count()
))
instance_count.short_description = '# instances'
class InstanceQuerySet(models.QuerySet):
'''Custom :class:`~django.db.models.QuerySet` for :class:`Instance` to
make it easy to find all instances that have a digital
edition'''
def with_digital_eds(self):
'''
Return :class:`derrida.books.models.Instance` queryset filtered by
having a digital edition.
'''
return self.exclude(digital_edition__isnull=True)
class Instance(Notable):
'''A single instance of a :class:`Work` - i.e., a specific copy or edition
or translation. Can also include books that appear as sections
of a collected works.'''
#: :class:`Work` this instance belongs to
work = models.ForeignKey(Work)
#: alternate title (optional)
alternate_title = models.CharField(blank=True, max_length=255)
#: :class:`Publisher` (optional)
publisher = models.ForeignKey(Publisher, blank=True, null=True)
#: publication :class:`~derrida.places.models.Place` (optional, sorted many to many)
pub_place = SortedManyToManyField(Place,
verbose_name='Place(s) of Publication', blank=True)
#: Zotero identifier
zotero_id = models.CharField(max_length=8, default='', blank=True)
# identifying slug for use in get_absolute_url, indexed for speed
slug = models.SlugField(max_length=255,
unique=True,
help_text=(
'To auto-generate a valid slug for a new '
'instance, choose a work then click '
'"Save and Continue Editing" in the lower '
'right. Editing slugs of previously saved '
'instances should be done with caution, '
'as this may break permanent links.'
),
blank=True
)
#: item is extant
is_extant = models.BooleanField(help_text='Extant in PUL JD', default=False)
#: item is annotated
is_annotated = models.BooleanField(default=False)
#: item is translated
is_translation = models.BooleanField(default=False)
#: description of item dimensions (optional)
dimensions = models.CharField(max_length=255, blank=True)
#: copyright year
copyright_year = models.PositiveIntegerField(blank=True, null=True)
#: related :class:`Journal` for a journal article
journal = models.ForeignKey(Journal, blank=True, null=True)
print_date_help_text = 'Date as YYYY-MM-DD, YYYY-MM, or YYYY format. Use' \
+ ' print date day/month/year known flags to indicate' \
+ ' that the information is not known.'
#: print date
print_date = models.DateField('Print Date',
blank=True, null=True, help_text=print_date_help_text)
#: print date day is known
print_date_day_known = models.BooleanField(default=False)
#: print date month is known
print_date_month_known = models.BooleanField(default=False)
#: print date year is known
print_date_year_known = models.BooleanField(default=True)
#: finding aid URL
uri = models.URLField('URI', blank=True, default='',
help_text='Finding Aid URL for items in PUL Derrida Library')
# item has a dedication
has_dedication = models.BooleanField(default=False)
# item has insertiosn
has_insertions = models.BooleanField(default=False)
# page range: using character fields to support non-numeric pages, e.g.
# roman numerals for introductory pages; using two fields to support
# sorting within a volume of collected works.
#: start page for book section or journal article
start_page = models.CharField(max_length=20, blank=True, null=True)
#: end page for book section or journal article
end_page = models.CharField(max_length=20, blank=True, null=True)
#: optional label to distinguish multiple copies of the same work
copy = models.CharField(max_length=1, blank=True,
help_text='Label to distinguish multiple copies of the same edition',
validators=[RegexValidator(r'[A-Z]',
message='Please set a capital letter from A-Z.'
)],
)
#: :class:`Language` this item is written in;
# uses :class:`InstanceLanguage` to indicate primary language
languages = models.ManyToManyField(Language, through='InstanceLanguage')
#: :class:`Instance` that collects this item, for book section
collected_in = models.ForeignKey('self', related_name='collected_set',
on_delete=models.SET_NULL, blank=True, null=True,
help_text='Larger work instance that collects or includes this item')
# work instances are connected to owning institutions via the Catalogue
# model; mapping as a many-to-many with a through
# model in case we want to access owning instutions directly
#: :class:`OwningInstitution`; connected through :class:`InstanceCatalogue`
owning_institutions = models.ManyToManyField(OwningInstitution,
through='InstanceCatalogue')
#: :class:`DerridaWork` this item is cited in
cited_in = models.ManyToManyField('DerridaWork',
help_text='Derrida works that cite this edition or instance',
blank=True)
#: digital edition via IIIF as instance of :class:`djiffy.models.Manifest`
digital_edition = models.OneToOneField(Manifest, blank=True, null=True,
on_delete=models.SET_NULL,
help_text='Digitized edition of this book, if available')
#: flag to suppress content page images, to comply with copyright
#: owner take-down request
suppress_all_images = models.BooleanField(default=False,
help_text='''Suppress large image display for all annotated pages
in this volume, to comply with copyright take-down requests.
(Overview images, insertions, and thumbnails will still display.)''')
#: specific page images to be suppressed, to comply with copyright
#: owner take-down request
suppressed_images = models.ManyToManyField(Canvas, blank=True,
help_text='''Suppress large image for specific annotated images to comply
with copyright take-down requests.''')
# proof-of-concept generic relation to footnotes
#: generic relation to :class:~`derrida.footnotes.models.Footnote`
footnotes = GenericRelation(Footnote)
objects = InstanceQuerySet.as_manager()
class Meta:
ordering = ['alternate_title', 'work__primary_title'] ## ??
verbose_name = 'Derrida library work instance'
unique_together = (("work", "copyright_year", "copy"),)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.generate_safe_slug()
super(Instance, self).save(*args, **kwargs)
def clean(self):
# Don't allow both journal and collected work
if self.journal and self.collected_in:
raise ValidationError('Cannot belong to both a journal and a collection')
def __str__(self):
return '%s (%s%s)' % (self.display_title(),
self.copyright_year or 'n.d.',
' %s' % self.copy if self.copy else '')
def get_absolute_url(self):
'''URL for this :class:`Instance` on the website.'''
return reverse('books:detail', kwargs={'slug': self.slug})
def get_uri(self):
'''public URI for this instance to be used as an identifier'''
return absolutize_url(reverse('books:instance', args=[self.id]))
def generate_base_slug(self):
'''Generate a slug based on first author, work title, and year.
Not guaranteed to be unique if there are multiple copies of
the same instance/edition of a work.
:rtype str: String in the format ``lastname-title-of-work-year``
'''
# get the first author, if there is one
author = self.work.authors.first()
if author:
# use the last name of the first author
author = author.authorized_name.split(',')[0]
else:
# otherwise, set it to an empty string
author = ''
# truncate the title to first several words of the title
title = ' '.join(self.work.primary_title.split()[:9])
# use copyright year if available, with fallback to work year if
year = self.copyright_year or self.work.year or ''
# # return a slug (not unique for multiple copies of same instance)
return slugify('%s %s %s' % (unidecode(author), unidecode(title), year))
def generate_safe_slug(self):
'''Generate a unique slug. Checks for duplicates and calculates
an appropriate copy letter if needed.
:rtype str: String in the format `lastname-title-of-work-year-copy`
'''
# base slug, without any copy letter
base_slug = self.generate_base_slug()
if self.copy:
slug = '-'.join([base_slug, self.copy])
else:
slug = base_slug
# check for any copies with the same base slug
duplicates = Instance.objects.filter(
slug__icontains=base_slug).order_by('-slug')
# exclude current record if it has already been saved
if self.pk:
duplicates = duplicates.exclude(pk=self.pk)
# any new copies should start with 'B' since 'A' is implicit in already
# saved slug for original
new_copy_letter = 'B'
# check for duplicates
if duplicates.exists():
# get the list of matching slugs
slugs = duplicates.values_list('slug', flat=True)
# if slug with specified copy is already unique, use that without
# further processing
if not slug in slugs:
return slug
# otherwise, calculate the appropriate copy letter to use
# collect copy suffixes from the slugs
# (trailing single uppercase letters only)
letters = [ltr for slug in slugs
for ltr in slug.rsplit('-', 1)[1]
if len(ltr) == 1 and ltr in string.ascii_uppercase]
# if existing copies letters are found, increment from the
# highest one (already sorted properly from queryset return)
if letters:
next_copy = chr(ord(letters[0]) + 1)
else:
# otherwise, default next copy is B (first is assumed to be A)
next_copy = 'B'
slug = '-'.join([base_slug, next_copy])
# also store the new copy letter as instance copy
self.copy = next_copy
return slug
def display_title(self):
'''display title - alternate title or work short title'''
return self.alternate_title or self.work.short_title or '[no title]'
display_title.short_description = 'Title'
def is_digitized(self):
'''boolean indicator if there is an associated digital edition'''
return bool(self.digital_edition) or \
bool(self.collected_in and self.collected_in.digital_edition)
# technically sorts on the foreign key, but that effectively filters
# instances with/without digital additions
is_digitized.admin_order_field = 'digital_edition'
is_digitized.boolean = True
def primary_language(self):
'''Primary :class:`Language` for this work instance. Use only
language or primary language for the instance if available; falls
back to only or primary language for the associated work.'''
langs = self.languages.all()
# if instance has only one language, use that
# (whether or not marked as primary)
if langs.exists():
# if more than one, filter to just primary
if langs.count() > 1:
langs = langs.filter(instancelanguage__is_primary=True)
# otherwise, return language for the work
if not langs and self.work.languages.exists():
langs = self.work.languages.all()
# filter by primary if more than one
if langs.count() > 1:
langs = langs.filter(worklanguage__is_primary=True)
if langs:
return langs.first()
@property
def location(self):
'''Location in Derrida's library (currently only available for
digitized books).'''
# NOTE: PUL digital editions from the Finding Aid include the
# location in the item title
if self.is_digitized():
# Split manifest label on dashes; at most we want the first two
location_parts = self.digital_edition.label.split(' - ')[:2]
# some volumes include a "Gift Books" notation we don't care about
if location_parts[-1].startswith('Gift Books'):
location_parts = location_parts[:-1]
return ', '.join(location_parts)
@property
def item_type(self):
'''item type: book, book section, or journal article'''
if self.journal:
return 'Journal Article'
if self.collected_in:
return 'Book Section'
return 'Book'
def author_names(self):
'''Display Work author names; convenience access for display in admin'''
return self.work.author_names()
author_names.short_description = 'Authors'
author_names.admin_order_field = 'work__authors__authorized_name'
def catalogue_call_numbers(self):
'''Convenience access to catalogue call numbers, for display in admin'''
return ', '.join([c.call_number for c in self.instancecatalogue_set.all()
if c.call_number])
catalogue_call_numbers.short_description = 'Call Numbers'
catalogue_call_numbers.admin_order_field = 'catalogue__call_number'
def print_year(self):
'''Year from :attr:`print_date` if year is known'''
if self.print_date and self.print_date_year_known:
return self.print_date.year
@property
def year(self):
'''year for indexing and display; :attr:`print_date` if known,
otherwise :attr:`copyright_year`'''
return self.print_year() or self.copyright_year
def images(self):
'''Queryset containing all :class:`djiffy.models.Canvas` objects
associated with the digital edition for this item.'''
if self.digital_edition:
return self.digital_edition.canvases.all()
return Canvas.objects.none()
#: terms in an image label that indicate a canvas should be
#: considered an overview image (e.g., cover & outside views)
overview_labels = ['cover', 'spine', 'back', 'edge', 'view']
def overview_images(self):
'''Overview images for this book - cover, spine, etc.
Filtered based on canvas label naming conventions.'''
label_query = models.Q()
for overview_label in self.overview_labels:
label_query |= models.Q(label__icontains=overview_label)
return self.images().filter(label_query) \
.exclude(label__icontains='insertion')
def annotated_pages(self):
'''Annotated pages for this book. Filtered based on the presence
of a documented :class:`~derrida.interventions.models.Intervention`
in the database.'''
return self.images().filter(intervention__isnull=False).distinct()
def insertion_images(self):
'''Insertion images for this book.
Filtered based on canvas label naming conventions.'''
# NOTE: using Insertion because of possible case-sensitive
# search on mysql even when icontains is used
return self.images().filter(label__icontains='Insertion')
@classmethod
def allow_canvas_detail(cls, canvas):
'''Check if canvas detail view is allowed. Allows insertion images,
overview images, and pages with documented interventions.'''
return any([
'insertion' in canvas.label.lower(),
any(label in canvas.label.lower()
for label in cls.overview_labels),
canvas.intervention_set.exists()
])
def allow_canvas_large_image(self, canvas):
'''Check if canvas large image view is allowed. Always allows
insertion images and overview images; other pages with documented
interventions are allowed as long as they are not suppressed,
either via :attr:`suppress_all_images` or specific
:attr:`suppressed_images`.'''
# insertion & overview always allowed
if any(['insertion' in canvas.label.lower(),
any(label in canvas.label.lower()
for label in self.overview_labels)]):
# allow
return True
# if all other images are suppressed, deny without checking further
if self.suppress_all_images:
return False
# if image has interventions, check if it is suppressed
if canvas.intervention_set.exists():
# deny if suppressed, otherwise allow
return canvas not in self.suppressed_images.all()
@property
def related_instances(self):
'''Find related works; for now, this means works by the
same author. For a work that collects item, include
work by any book section authors.'''
authors = list(self.work.authors.all())
if self.collected_set.exists():
for instance in self.collected_set.all():
authors.extend(instance.work.authors.all())
return Instance.objects.filter(work__authors__in=authors) \
.exclude(pk=self.pk) \
.exclude(digital_edition__isnull=True)
class WorkSubject(Notable):
'''Through-model for work-subject relationship, to allow designating
a particular subject as primary or adding notes.'''
#: :class:`Subject`
subject = models.ForeignKey(Subject)
#: :class:`Work`
work = models.ForeignKey(Work)
#: boolean flag indicating if this subject is primary for this work
is_primary = models.BooleanField(default=False)
class Meta:
unique_together = ('subject', 'work')
verbose_name = 'Subject'
def __str__(self):
return '%s %s%s' % (self.work, self.subject,
' (primary)' if self.is_primary else '')
class WorkLanguage(Notable):
'''Through-model for work-language relationship, to allow designating
one language as primary or adding notes.'''
#: :class:`Language`
language = models.ForeignKey(Language)
#: :class:`Work`
work = models.ForeignKey(Work)
#: boolean flag indicating if this language is primary for this work
is_primary = models.BooleanField()
class Meta:
unique_together = ('work', 'language')
verbose_name = 'Language'
def __str__(self):
return '%s %s%s' % (self.work, self.language,
' (primary)' if self.is_primary else '')
class InstanceLanguage(Notable):
'''Through-model for instance-language relationship, to allow designating
one language as primary or adding notes.'''
#: :class:`Language`
language = models.ForeignKey(Language)
#: :class:`Instance`
instance = models.ForeignKey(Instance)
#: boolean flag indicating if this language is primary for this instance
is_primary = models.BooleanField()
class Meta:
unique_together = ('instance', 'language')
verbose_name = 'Language'
def __str__(self):
return '%s %s%s' % (self.instance, self.language,
' (primary)' if self.is_primary else '')
class InstanceCatalogue(Notable, DateRange):
'''Location of a work instance in the real world, associating it with an
owning instutition.'''
institution = models.ForeignKey(OwningInstitution)
instance = models.ForeignKey(Instance)
is_current = models.BooleanField()
# using char instead of int because assuming call numbers may contain
# strings as well as numbers
call_number = models.CharField(max_length=255, blank=True, null=True,
help_text='Used for Derrida shelf mark')
class Meta:
verbose_name = 'Catalogue'
def __str__(self):
dates = ''
if self.dates:
dates = ' (%s)' % self.dates
return '%s / %s%s' % (self.instance, self.institution, dates)
class CreatorType(Named, Notable):
'''Type of creator role a person can have to a book - author,
editor, translator, etc.'''
uri = models.URLField(blank=True, null=True)
class InstanceCreator(Notable):
creator_type = models.ForeignKey(CreatorType)
# technically should disallow author here, but can clean that up later
person = models.ForeignKey(Person)
instance = models.ForeignKey(Instance)
def __str__(self):
return '%s %s %s' % (self.person, self.creator_type, self.instance)
class PersonBookRelationshipType(Named, Notable):
'''Type of non-annotation relationship assocating a person
with a book.'''
uri = models.URLField(blank=True, null=True)
class PersonBook(Notable, DateRange):
'''Interactions or connections between books and people other than
annotation.'''
person = models.ForeignKey(Person)
book = models.ForeignKey(Instance)
relationship_type = models.ForeignKey(PersonBookRelationshipType)
class Meta:
verbose_name = 'Person/Book Interaction'
def __str__(self):
dates = ''
if self.dates:
dates = ' (%s)' % self.dates
return '%s - %s%s' % (self.person, self.book, dates)
# New citationality model
class DerridaWork(Notable):
'''This models the reference copy used to identify all citations, not
part of Derrida's library'''
#: short title
short_title = models.CharField(max_length=255)
#: full citation
full_citation = models.TextField()
#: boolean indicator for primary work
is_primary = models.BooleanField()
#: slug for use in URLs
slug = models.SlugField(
help_text='slug for use in URLs (changing after creation will break URLs)')
#: zotero collection ID for use in populating library
zotero_id = models.CharField(max_length=8, default='', blank=True)
def __str__(self):
return self.short_title
class DerridaWorkSection(models.Model):
'''Sections of a :class:`DerridaWork` (e.g. chapters). Used to look at
:class:`Reference` by sections of the work.'''
name = models.CharField(max_length=255)
derridawork = models.ForeignKey(DerridaWork)
order = models.PositiveIntegerField('Order')
start_page = models.IntegerField(blank=True, null=True,
help_text='Sections with no pages will be treated as headers.')
end_page = models.IntegerField(blank=True, null=True)
class Meta:
ordering = ['derridawork', 'order']
def __str__(self):
return self.name
class ReferenceType(Named, Notable):
'''Type of reference, i.e. citation, quotation, footnotes, epigraph.'''
class ReferenceQuerySet(models.QuerySet):
'''Custom :class:`~django.db.models.QuerySet` for :class:`Reference`.'''
def order_by_source_page(self):
'''Order by page in derrida work (attr:`Reference.derridawork_page`)'''
return self.order_by('derridawork_page')
def order_by_author(self):
'''Order by author of cited work'''
return self.order_by('instance__work__authors__authorized_name')
def summary_values(self, include_author=False):
'''Return a values list of summary information for display or
visualization. Currently used for histogram visualization.
Author of cited work is aliased to `author`.
:param include_author: optionally include author information;
off by default, since this creates repeated records for
references to multi-author works
'''
extra_fields = {}
if include_author:
extra_fields['author'] = models.F('instance__work__authors__authorized_name')
return self.values(
'id', 'instance__slug', 'derridawork__slug',
'derridawork_page', 'derridawork_pageloc', **extra_fields)
class Reference(models.Model):
'''Reference to a book from a work by Derrida. Can be a citation,
quotation, or other kind of reference.'''
#: :class:`Instance` that is referenced
instance = models.ForeignKey(Instance, blank=True, null=True)
#: :class:`DerridaWork` that references the item
derridawork = models.ForeignKey(DerridaWork)
#: page in the Derrida work.
derridawork_page = models.IntegerField()
#: location/identifier on the page
derridawork_pageloc = models.CharField(max_length=2)
#: page in the referenced item
book_page = models.CharField(max_length=255, blank=True)
#: :class:`ReferenceType`
reference_type = models.ForeignKey(ReferenceType)
#: anchor text
anchor_text = models.TextField(blank=True)
#: ManyToManyField to :class:`djiffy.models.Canvas`
canvases = models.ManyToManyField(Canvas, blank=True,
help_text="Scanned images from Derrida's Library | ")
#: ManyToManyField to :class:`derrida.interventions.Intervention`
interventions = models.ManyToManyField('interventions.Intervention',
blank=True) # Lazy reference to avoid a circular import
objects = ReferenceQuerySet.as_manager()
class Meta:
ordering = ['derridawork', 'derridawork_page', 'derridawork_pageloc']
def __str__(self):
return "%s, %s%s: %s, %s, %s" % (
self.derridawork.short_title,
self.derridawork_page,
self.derridawork_pageloc,
# instance is technically optional...
self.instance.display_title() if self.instance else '[no instance]',
self.book_page,
self.reference_type
)
def get_absolute_url(self):
'''URL for this reference on the site'''
# NOTE: currently view is html snippet for loading via ajax only
return reverse('books:reference', kwargs={
'derridawork_slug': self.derridawork.slug,
'page': self.derridawork_page,
'pageloc': self.derridawork_pageloc
})
def get_uri(self):
'''public URI for this instance to be used as an identifier'''
return absolutize_url(self.get_absolute_url())
def anchor_text_snippet(self):
'''Anchor text snippet, for admin display'''
snippet = self.anchor_text[:100]
if len(self.anchor_text) > 100:
return ''.join([snippet, ' ...'])
return snippet
anchor_text_snippet.short_description = 'Anchor Text'
anchor_text.admin_order_field = 'anchor_text'
@property
def instance_slug(self):
'''Slug for the work instance used to display this reference.
For a reference to a book section, returns the slug
for the book that collects it.
'''
return self.book.slug
@property
def instance_url(self):
'''absolute url for the work instance where this reference
is displayed; uses :attr:`instance_slug`'''
return reverse('books:detail', args=[self.instance_slug])
@property
def book(self):
'''The "book" this reference is associated with; for a book section,
this is the work instance the section is collected in; for all other
cases, it is the work instance associated with this reference.
'''
return self.instance.collected_in or self.instance
@staticmethod
def instance_ids_with_digital_editions():
'''Used as a convenience method to provide a readonly field in the
admin change form for :class:`Reference` with a list of JSON formatted
primary keys. This is used by jQuery in the :class:`Reference`
change_form and reference inlines on the :class:`Instance`change_form
to disable the autocomplete fields when there is or is not a digital
edition. See ``sitemedia/js/reference-instance-canvas-toggle.js`` for
this logic.
:rtype: JSON formatted string of :class:`Instance` primary keys
'''
with_digital_eds = Instance.objects.with_digital_eds()
# Flatten to just the primary keys
ids = with_digital_eds.values_list('id', flat=True).order_by('id')
# Return serialized JSON
return json.dumps(list(ids))
def get_section(self):
'''Get the section name for a reference in grammatologie'''
# Hard coding because of the way DerridaWorkSection models are set up
# For convenience, assuming that we're only working with De la grammatologie
# Not making a property since that seems to mess with solr indexing
PART_2_CUTOFF = 140
return 'Part 1' if self.derridawork_page <= PART_2_CUTOFF else 'Part 2'
def get_chapter(self):
'''Get the chapter name for a reference in grammatologie'''
# For convenience, assuming that we're only working with De la grammatologie
# Not making a property since that seems to mess with solr indexing
# Some references have a page number before the first section (?)
for section in DerridaWorkSection.objects.all():
# Chapters have start and end pages, otherwise they're "Part 1" or "Part 2"
# and handled by the get_section method
if section.start_page and section.end_page:
if section.start_page <= self.derridawork_page <= section.end_page:
return section.name
| {
"content_hash": "87c0a029e0ae8540c1ee7508460a64ca",
"timestamp": "",
"source": "github",
"line_count": 862,
"max_line_length": 111,
"avg_line_length": 41.01276102088167,
"alnum_prop": 0.6411902808813962,
"repo_name": "Princeton-CDH/derrida-django",
"id": "ce4506576cf927b183af4e11dd1b2b024ea7ffcf",
"size": "35353",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "derrida/books/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24766"
},
{
"name": "HTML",
"bytes": "114756"
},
{
"name": "JavaScript",
"bytes": "81463"
},
{
"name": "Makefile",
"bytes": "913"
},
{
"name": "Python",
"bytes": "512326"
},
{
"name": "SCSS",
"bytes": "123546"
},
{
"name": "Shell",
"bytes": "979"
}
],
"symlink_target": ""
} |
import copy
import logging
import random
import time
from oslo_config import cfg
import six
from oslo_service._i18n import _, _LE, _LI
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
def list_opts():
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(periodic_opts))]
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]]
[, name=[None|"string"])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts. If name is not provided, __name__ of function is used.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
f._periodic_name = kwargs.pop('name', f.__name__)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def _add_periodic_task(cls, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
:return: whether task was actually enabled
"""
name = task._periodic_name
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
return False
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
return False
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
return True
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
cls._add_periodic_task(value)
def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...
0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter
@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def add_periodic_task(self, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
"""
if self.__class__._add_periodic_task(task):
self._periodic_last_run[task._periodic_name] = (
task._periodic_last_run)
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = _nearest_boundary(
last_run, spacing)
try:
task(self, context)
except Exception:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s"),
{"full_task_name": full_task_name})
time.sleep(0)
return idle_for
| {
"content_hash": "082c19088147e3eeef6e9e6c8788fa63",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 79,
"avg_line_length": 35.21917808219178,
"alnum_prop": 0.5960067418643848,
"repo_name": "eezhova/oslo.service",
"id": "d58a8c7440658e0d074c19fc629769d8ec0017b8",
"size": "8288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_service/periodic_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93111"
}
],
"symlink_target": ""
} |
from . import domainresource
class Specimen(domainresource.DomainResource):
""" Sample for analysis.
A sample to be used for analysis.
"""
resource_type = "Specimen"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.accessionIdentifier = None
""" Identifier assigned by the lab.
Type `Identifier` (represented as `dict` in JSON). """
self.collection = None
""" Collection details.
Type `SpecimenCollection` (represented as `dict` in JSON). """
self.condition = None
""" State of the specimen.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.container = None
""" Direct container of specimen (tube/slide, etc.).
List of `SpecimenContainer` items (represented as `dict` in JSON). """
self.identifier = None
""" External Identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.note = None
""" Comments.
List of `Annotation` items (represented as `dict` in JSON). """
self.parent = None
""" Specimen from which this specimen originated.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.processing = None
""" Processing and processing step details.
List of `SpecimenProcessing` items (represented as `dict` in JSON). """
self.receivedTime = None
""" The time when specimen was received for processing.
Type `FHIRDate` (represented as `str` in JSON). """
self.request = None
""" Why the specimen was collected.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.status = None
""" available | unavailable | unsatisfactory | entered-in-error.
Type `str`. """
self.subject = None
""" Where the specimen came from. This may be from patient(s), from a
location (e.g., the source of an environmental sample), or a
sampling of a substance or a device.
Type `FHIRReference` (represented as `dict` in JSON). """
self.type = None
""" Kind of material that forms the specimen.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(Specimen, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Specimen, self).elementProperties()
js.extend([
("accessionIdentifier", "accessionIdentifier", identifier.Identifier, False, None, False),
("collection", "collection", SpecimenCollection, False, None, False),
("condition", "condition", codeableconcept.CodeableConcept, True, None, False),
("container", "container", SpecimenContainer, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("note", "note", annotation.Annotation, True, None, False),
("parent", "parent", fhirreference.FHIRReference, True, None, False),
("processing", "processing", SpecimenProcessing, True, None, False),
("receivedTime", "receivedTime", fhirdate.FHIRDate, False, None, False),
("request", "request", fhirreference.FHIRReference, True, None, False),
("status", "status", str, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
from . import backboneelement
class SpecimenCollection(backboneelement.BackboneElement):
""" Collection details.
Details concerning the specimen collection.
"""
resource_type = "SpecimenCollection"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.bodySite = None
""" Anatomical collection site.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.collectedDateTime = None
""" Collection time.
Type `FHIRDate` (represented as `str` in JSON). """
self.collectedPeriod = None
""" Collection time.
Type `Period` (represented as `dict` in JSON). """
self.collector = None
""" Who collected the specimen.
Type `FHIRReference` (represented as `dict` in JSON). """
self.duration = None
""" How long it took to collect specimen.
Type `Duration` (represented as `dict` in JSON). """
self.fastingStatusCodeableConcept = None
""" Whether or how long patient abstained from food and/or drink.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.fastingStatusDuration = None
""" Whether or how long patient abstained from food and/or drink.
Type `Duration` (represented as `dict` in JSON). """
self.method = None
""" Technique used to perform collection.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.quantity = None
""" The quantity of specimen collected.
Type `Quantity` (represented as `dict` in JSON). """
super(SpecimenCollection, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SpecimenCollection, self).elementProperties()
js.extend([
("bodySite", "bodySite", codeableconcept.CodeableConcept, False, None, False),
("collectedDateTime", "collectedDateTime", fhirdate.FHIRDate, False, "collected", False),
("collectedPeriod", "collectedPeriod", period.Period, False, "collected", False),
("collector", "collector", fhirreference.FHIRReference, False, None, False),
("duration", "duration", duration.Duration, False, None, False),
("fastingStatusCodeableConcept", "fastingStatusCodeableConcept", codeableconcept.CodeableConcept, False, "fastingStatus", False),
("fastingStatusDuration", "fastingStatusDuration", duration.Duration, False, "fastingStatus", False),
("method", "method", codeableconcept.CodeableConcept, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
])
return js
class SpecimenContainer(backboneelement.BackboneElement):
""" Direct container of specimen (tube/slide, etc.).
The container holding the specimen. The recursive nature of containers;
i.e. blood in tube in tray in rack is not addressed here.
"""
resource_type = "SpecimenContainer"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.additiveCodeableConcept = None
""" Additive associated with container.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.additiveReference = None
""" Additive associated with container.
Type `FHIRReference` (represented as `dict` in JSON). """
self.capacity = None
""" Container volume or size.
Type `Quantity` (represented as `dict` in JSON). """
self.description = None
""" Textual description of the container.
Type `str`. """
self.identifier = None
""" Id for the container.
List of `Identifier` items (represented as `dict` in JSON). """
self.specimenQuantity = None
""" Quantity of specimen within container.
Type `Quantity` (represented as `dict` in JSON). """
self.type = None
""" Kind of container directly associated with specimen.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(SpecimenContainer, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SpecimenContainer, self).elementProperties()
js.extend([
("additiveCodeableConcept", "additiveCodeableConcept", codeableconcept.CodeableConcept, False, "additive", False),
("additiveReference", "additiveReference", fhirreference.FHIRReference, False, "additive", False),
("capacity", "capacity", quantity.Quantity, False, None, False),
("description", "description", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("specimenQuantity", "specimenQuantity", quantity.Quantity, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class SpecimenProcessing(backboneelement.BackboneElement):
""" Processing and processing step details.
Details concerning processing and processing steps for the specimen.
"""
resource_type = "SpecimenProcessing"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.additive = None
""" Material used in the processing step.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.description = None
""" Textual description of procedure.
Type `str`. """
self.procedure = None
""" Indicates the treatment step applied to the specimen.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.timeDateTime = None
""" Date and time of specimen processing.
Type `FHIRDate` (represented as `str` in JSON). """
self.timePeriod = None
""" Date and time of specimen processing.
Type `Period` (represented as `dict` in JSON). """
super(SpecimenProcessing, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SpecimenProcessing, self).elementProperties()
js.extend([
("additive", "additive", fhirreference.FHIRReference, True, None, False),
("description", "description", str, False, None, False),
("procedure", "procedure", codeableconcept.CodeableConcept, False, None, False),
("timeDateTime", "timeDateTime", fhirdate.FHIRDate, False, "time", False),
("timePeriod", "timePeriod", period.Period, False, "time", False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| {
"content_hash": "cb5049ca9db296543c3f5900a22e7344",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 141,
"avg_line_length": 40.851612903225806,
"alnum_prop": 0.6174194567277321,
"repo_name": "all-of-us/raw-data-repository",
"id": "e1dd9b5ba47773b926671f652cef83e26432fcdd",
"size": "12846",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_4_0_0/models/specimen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
import ml_collections
class hyper:
pass
def D(**kwargs):
return ml_collections.ConfigDict(initial_dictionary=kwargs)
# added, edited
end_num_steps = 1 # eventual number of steps in the distilled sampler
start_num_steps = 1024 #512 #1024 #1024 #512 #1024 #NOTE: todo change to #1024 # number of steps in baseline sampler
distill_steps_per_iter = 50000 #50000 #10000 #10000 #1000 #50000
# NOTE alithough it's stage1 it's actually stage 2
teacher_ckpt_path = 'projects/diffusion/cifar_stage1_43492025/1/retained_checkpoints/checkpoint_620000'
train_batch = 128 #2048 # 256
lr = 1e-4 #3e-4 #1e-4
sampling_num_steps_train_start = 128
use_eval_ckpt_dir = True
# # # our two stage approach
# eval_ckpt_dir = 'projects/diffusion/cifar10_stage2_50k_ema_decay0_43877018/1/retained_checkpoints/'
# sampler='ddim'
# nvidia_2step approach
eval_ckpt_dir = 'projects/diffusion/cifar_stage2_twostep_nvidia_50k_43902200/1/retained_checkpoints/'
sampler = 'new_two_step_nvidia'
# previous runs with ema-decay 0.9999
# 'projects/diffusion/cifar_stage1_43555434/1/retained_checkpoints/'
use_retained_ckpt = True
w_sample_const = 1.0 #0.
waiting_for_new_ckpt = False
progressive_sampling_step = True
def get_config():
config = D(
launch=D(
sweep=hyper.product([
hyper.sweep('config.seed', [0]), #TODO [1, 2, 3] change to [0]
hyper.sweep('config.model.args.uncond_prob',
[0.]), # check NOTE not 0.1
hyper.sweep('config.eval.w_sample_const', [0., 0.3, 1., 2., 4.])
]),),
# added
distillation=D(
# teacher checkpoint is used for teacher and initial params of student
teacher_checkpoint_path=teacher_ckpt_path,
steps_per_iter=distill_steps_per_iter, # number of distillation training steps per halving of sampler steps
only_finetune_temb=False,
start_num_steps=start_num_steps,
end_num_steps=end_num_steps,
another_teacher_init=False, #True, # added
),
# added
seed=0,
progressive_distill=True, # a flag for stage 2 training
main_class='Model',
dataset=D(
name='CIFAR10',
args=D(
# image_size=64,
class_conditional=True,
randflip=True,
),
),
sampler=sampler, #'noisy',
use_eval_ckpt_dir=use_eval_ckpt_dir,
eval_ckpt_dir=eval_ckpt_dir,
waiting_for_new_ckpt=waiting_for_new_ckpt,
progressive_sampling_step=progressive_sampling_step,
model=D(
# architecture
name='w_unet3',
args=D(
ch=256,
emb_ch=1024, # default is ch * 4
ch_mult=[1, 1, 1],
num_res_blocks=3,
attn_resolutions=[8, 16],
num_heads=1,
# head_dim=64,
dropout=0., # NOTE changes 0.1,
logsnr_input_type='inv_cos',
w_input_type='inv_cos', # w embedding added
resblock_resample=True,
uncond_prob=0.1, #NOTE: default, but as sweep 0.,
# extra_class=True,
),
teacher_extra_class=True, #NOTE added
mean_type='v', #'eps', #both might not work since the teach model uses eps, 'both', #NOTE: need to implement 'eps',
teacher_mean_type='v', # added
logvar_type='fixed_large', #'fixed_medium:0.3', # TODO: check
mean_loss_weight_type='snr_trunc', #NOTE:default 'snr_trunc', 'snr' performs worse #'constant', #NOTE changed defalut 'snr_trunc', #constant='mse', snr, snr_trunc
logvar_loss_type='none',
# logsnr schedule
train_num_steps=end_num_steps,
eval_sampling_num_steps=end_num_steps,
train_logsnr_schedule=D(
name='cosine', logsnr_min=-20., logsnr_max=20.),
eval_logsnr_schedule=D(
name='cosine', logsnr_min=-20., logsnr_max=20.),
eval_clip_denoised=True,
# added
eval_sampling_num_steps_train_start=sampling_num_steps_train_start, # NOTE: need to change
noisy_sampler_interpolation=0.2,
train_w_schedule=D(
name='uniform',
# logsnr_min=0., logsnr_max=0.5),
# logsnr_min=0., logsnr_max=1.0),
# logsnr_min=0., logsnr_max=2.0),
logsnr_min=0.,
logsnr_max=4.),
),
train=D(
# optimizer
batch_size=train_batch, #2048, # TODO: change back 2048,
optimizer='adam',
learning_rate=lr, # 1e-4 for 50k, 2e-4 for 10k #3e-4, #NOTE: todo #1e-4, #edited 3e-4,
learning_rate_warmup_steps=0, #edited 10000, # used to be 1k, but 10k helps with stability
learning_rate_anneal_type='linear', # TODO: checked
learning_rate_anneal_steps=distill_steps_per_iter, # TODO: checked
weight_decay=0.0,
ema_decay=0.9999, #0.,
grad_clip=1.0,
substeps=10,
enable_update_skip=False,
# logging
log_loss_every_steps=100,
checkpoint_every_secs=900, # 15 minutes
retain_checkpoint_every_steps=20000, # old checkpoints won't get deleted
eval_every_steps=20000,
w_conditoned_training=True, # added
w_warmup_steps=0, # NOTE, set 0 10000, # added to worm up w embedding
),
eval=D(
batch_size=128, # TODO change to 128,
num_inception_samples=50000,
sampling_fixed_classes=[0, 5], # visualize malamute, siamese cat
sampling_fixed_w=[0.1, 0.3, 0.5], # NOTE, not used
use_retained_ckpt=use_retained_ckpt,
w_sample_const=w_sample_const,
),
)
# added
if hasattr(config, 'progressive_distill'):
# run some sanity checks on inputs
assert config.distillation.start_num_steps > 0
assert config.distillation.end_num_steps > 0
assert config.distillation.start_num_steps % config.distillation.end_num_steps == 0
return config
| {
"content_hash": "6f5ca0c281c7798d40f0743b021425d3",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 173,
"avg_line_length": 38.00625,
"alnum_prop": 0.5916790001644466,
"repo_name": "google-research/google-research",
"id": "9a95ea69769035a7dfc5f7c3510de0785537557e",
"size": "6859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddpm_w_distillation/ddpm_w_distillation/config/dir_sample_cifar_condw_stage2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_log import log as logging
from designate.i18n import _LI
from designate import coordination
from designate import service
from designate.central import rpcapi as central_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Service(coordination.CoordinationMixin, service.Service):
def __init__(self, threads=None):
super(Service, self).__init__(threads=threads)
self._partitioner = coordination.Partitioner(
self._coordinator, self.service_name, self._coordination_id,
range(0, 4095))
def _rebalance(self, my_partitions, members, event):
LOG.info(_LI("Received rebalance event %s") % event)
self.partition_range = my_partitions
def start(self):
super(Service, self).start()
self._partitioner.start()
self._partitioner.watch_partition_change(self._rebalance)
@property
def service_name(self):
return 'zone_manager'
@property
def central_api(self):
return central_api.CentralAPI.get_instance()
| {
"content_hash": "fd2d4d46070a4394f418abd6f9f057f1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 72,
"avg_line_length": 28.945945945945947,
"alnum_prop": 0.6834733893557423,
"repo_name": "kiall/designate-py3",
"id": "699af2fb06c1aaf567904ca7ecfc49c5b9416868",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/zone_manager/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "9136"
},
{
"name": "JavaScript",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "1977010"
},
{
"name": "Ruby",
"bytes": "4238"
},
{
"name": "Shell",
"bytes": "13056"
}
],
"symlink_target": ""
} |
"""
A dirty script to send PDFs to my personal Kindle.
Credentials (email password, email addresses, etc, should be kept in a `.env` file.)
"""
import argparse
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
from halo import Halo
from redmail import EmailSender
parser = argparse.ArgumentParser(description="Send a PDF to my Kindle.")
parser.add_argument("file", help="The file to be sent.")
parser.add_argument(
"-s", "--subject", default="Convert", help="The subject of the email.", type=str
)
parser.add_argument(
"-c", "--config", default=".env", help="The .env file to use.", required=False
)
args = parser.parse_args()
if os.path.isfile(args.config):
load_dotenv(args.config)
def build_email():
"""Constructs the email"""
email = EmailSender(
host="smtp.office365.com",
port=587,
username=os.getenv("EMAIL_ADDRESS"),
password=os.getenv("EMAIL_PASSWORD"),
)
filename = os.path.basename(args.file)
return email, filename
# Sending the email
@Halo(text="Sending email...", spinner="dots")
def send_email(email, filename):
"""Sends the constructed email."""
email.send(
sender=os.getenv("EMAIL_ADDRESS"),
receivers=[os.getenv("KINDLE_ADDRESS")],
subject=args.subject,
attachments={filename: Path(args.file).read_bytes()},
)
try:
email, filename = build_email()
send_email(email, filename)
print(f"::: Sent {filename} to {os.getenv('KINDLE_ADDRESS')}")
# pylint: disable=broad-except
except Exception as e:
print(e)
sys.exit(1)
| {
"content_hash": "bee299b2945229eb703429d4d6f7c7cb",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 26.18032786885246,
"alnum_prop": 0.667501565435191,
"repo_name": "bblinder/home-brews",
"id": "f8add56ae27cf43da6d3650af162febfbbe83570",
"size": "1621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "send2kindle.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "392"
},
{
"name": "Python",
"bytes": "35874"
},
{
"name": "Shell",
"bytes": "44561"
}
],
"symlink_target": ""
} |
"""
Using the results of exercises 1.16 and 1.17, devise a procedure that generates an iterative process for multiplying two
integers in terms of adding, doubling, and halving and uses a logarithmic number of steps.
Note from the book:
This algorithm, which is sometimes known as the ``Russian peasant method'' of multiplication, is ancient. Examples of
its use are found in the Rhind Papyrus, one of the two oldest mathematical documents in existence, written about 1700
B.C. (and copied from an even older document) by an Egyptian scribe named A'h-mose.
"""
from operator import eq, add, sub
from Chapter1.exercise1_17 import double, fast_multiply, halve
from Chapter1.themes.exponentiation import is_even
def mult(a, b):
def mult_iter(first, counter, sum):
if eq(counter, 0):
return sum
if is_even(counter):
return mult_iter(
double(first),
halve(counter),
sum
)
return mult_iter(
first,
sub(counter, 1),
add(first, sum)
)
return mult_iter(a, b, 0)
def run_the_magic():
from timeit import Timer
b, n = 10, 1000
timer = Timer(stmt='mult(%(b)s, %(n)s)' % locals(), setup='from Chapter1.exercise1_18 import mult')
print('(* %(b)s %(n)s)' % locals(), mult(b, n), 'Time: %s' % (timer.timeit(),), sep='\n')
timer = Timer(stmt='fast_multiply(%(b)s, %(n)s)' % locals(), setup='from Chapter1.exercise1_18 import fast_multiply')
print('(fast-multiply %(b)s %(n)s)' % locals(), fast_multiply(b, n), 'Time: %s' % (timer.timeit(),), sep='\n')
if __name__ == '__main__':
run_the_magic()
| {
"content_hash": "7727a8aa1b46e9ee485c0ff081dfb532",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 121,
"avg_line_length": 36.391304347826086,
"alnum_prop": 0.6242532855436081,
"repo_name": "aoyono/sicpy",
"id": "6da799c151cae58a9be0006366b21afdf233cd6f",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Chapter1/exercises/exercise1_18.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "229644"
}
],
"symlink_target": ""
} |
"""
Cards recognition software for game of Chinese Poker
The game is played by creating a stack of 5,5,3 cards.
Each hand must be worth less than the hand below it.
The application also compares two players together and calculates the points
"""
import sys
import numpy as np
import cv2 as cv
import scoreGame as scr
####################### MODULE VARIABLES ###################################
# Retrieve this module
this = sys.modules[__name__]
# Store module variables
this.path = "/Users/matteo/Desktop/ChinesePoker/"
####################### COMPUTER VISION ##################################
# Function identifies cards by comparing with templates
# Suffers from scaling problems !!!
def getCards_Template(img):
# Read in the template image
template = cv.imread(this.path + "test/template_spades.jpeg",0)
scale = 0.4
template = cv.resize(template,None,fx=scale,fy=scale,interpolation=cv.INTER_AREA)
w, h = template.shape[::-1]
# Perform template matching
res = cv.matchTemplate(img,template,cv.TM_CCOEFF_NORMED)
threshold = 0.7
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
# Show image
showImage(img)
# Function returns an array containing the card values and their "color"
def getCards_Threshold(img):
# Convert to gray image and blur
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray,(1,1),1000)
# Threshold the image to search for contours
# Use simple thresholding
#flag, thresh = cv.threshold(blur, 200, 255, cv.THRESH_BINARY)
#flag, thresh = cv.threshold(blur, 200, 255, cv.THRESH_BINARY)
# Use adaptive gaussian thresholding
thresh = cv.adaptiveThreshold(blur, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 121, 0)
# Find the contours, sort them by area and keep the largest 3
imgC, contours, hierarchy = cv.findContours(thresh,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv.contourArea, reverse=True)[:3]
# Iterate through the contours and separate the cards
# Draw the contours on the image
imgCD = cv.drawContours(img,contours,-1,(0,255,0),2)
showImage(imgCD)
# Show image and wait for key stroke to destroy window
def showImage(img):
cv.imshow('image',img)
cv.waitKey(0)
cv.destroyAllWindows()
if __name__ == '__main__':
# Specify filename, load image and show it
filename = this.path + "test/test_3.jpeg"
#simgOrig = cv.imread(filename,0)
# Scale image
scale = 0.2
#img = cv.resize(imgOrig,None,fx=scale,fy=scale,interpolation=cv.INTER_AREA)
| {
"content_hash": "07cc92cc0164cf5a40a9d1b2b2075baf",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 101,
"avg_line_length": 33.67088607594937,
"alnum_prop": 0.6672932330827067,
"repo_name": "matteobe/ChinesePoker",
"id": "fee5ad5103d13d6ef0cb5251cd359aa0b7b2cfa0",
"size": "2660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cardsRecognition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13905"
}
],
"symlink_target": ""
} |
from model import Event
from google.appengine.ext import db
def post(handler, response):
key = handler.request.get('key')
response.event = event = Event.get(key) if key else Event()
event.name = handler.request.get('name')
event.link = handler.request.get('link')
event.city = handler.request.get('city')
event.date = handler.request.get('date')
event.slug = handler.request.get('slug')
lat = handler.request.get('lat')
lon = handler.request.get('lon')
if lat and lon:
event.location = db.GeoPt(lat, lon)
event.update_location()
event.put()
response.redirect=event.permalink()
handler.redirect(event.permalink())
def get(handler, response):
key = handler.request.get('key')
response.event = Event.get(key) if key else Event()
| {
"content_hash": "60889d0a6e6b57e664eb8442189e7c19",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 61,
"avg_line_length": 33.30434782608695,
"alnum_prop": 0.7036553524804178,
"repo_name": "globalspin/haemapod",
"id": "7c8902928337f4679e0debc536be3b27c8f455be",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haemapod/handlers/events/add.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='data-sync-s3',
version='0.0.1',
description='Data Sync S3',
author='Peter Bull',
author_email='[email protected]',
url='http://www.drivendata.org/',
license='MIT',
packages=find_packages(),
entry_points={
'console_scripts': [
'data-sync-s3=data_sync:main',
]},
zip_safe=True,
) | {
"content_hash": "02502e99d0daaf79b931f135bf79b7f5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 43,
"avg_line_length": 20.63157894736842,
"alnum_prop": 0.6071428571428571,
"repo_name": "pjbull/data-sync",
"id": "cdb53fe9be6cb14f5d8d528c04974889092f6791",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4359"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pwd',
version='0.3.5',
description="pwd",
long_description=readme + '\n\n' + history,
author="Wes Turner",
author_email='[email protected]',
url='https://github.com/westurner/pwd',
packages=[
'pwd',
],
package_dir={'pwd':
'pwd'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='pwd',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| {
"content_hash": "7851aa39ef126e6782d85b43f6e59552",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 63,
"avg_line_length": 26.28301886792453,
"alnum_prop": 0.6001435750179469,
"repo_name": "westurner/pwd",
"id": "c39d895300739807182c1651ca42e60e3509a89b",
"size": "1441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "24221"
},
{
"name": "JavaScript",
"bytes": "26286"
},
{
"name": "Makefile",
"bytes": "2380"
},
{
"name": "Python",
"bytes": "1950"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import errno
import socket
import unittest
import gobject
from mock import Mock, patch, sentinel
from mopidy.utils import network
from tests import any_int
class ServerTest(unittest.TestCase):
def setUp(self):
self.mock = Mock(spec=network.Server)
def test_init_calls_create_server_socket(self):
network.Server.__init__(
self.mock, sentinel.host, sentinel.port, sentinel.protocol)
self.mock.create_server_socket.assert_called_once_with(
sentinel.host, sentinel.port)
def test_init_calls_register_server(self):
sock = Mock(spec=socket.SocketType)
sock.fileno.return_value = sentinel.fileno
self.mock.create_server_socket.return_value = sock
network.Server.__init__(
self.mock, sentinel.host, sentinel.port, sentinel.protocol)
self.mock.register_server_socket.assert_called_once_with(
sentinel.fileno)
def test_init_fails_on_fileno_call(self):
sock = Mock(spec=socket.SocketType)
sock.fileno.side_effect = socket.error
self.mock.create_server_socket.return_value = sock
with self.assertRaises(socket.error):
network.Server.__init__(
self.mock, sentinel.host, sentinel.port, sentinel.protocol)
def test_init_stores_values_in_attributes(self):
# This need to be a mock and no a sentinel as fileno() is called on it
sock = Mock(spec=socket.SocketType)
self.mock.create_server_socket.return_value = sock
network.Server.__init__(
self.mock, sentinel.host, sentinel.port, sentinel.protocol,
max_connections=sentinel.max_connections, timeout=sentinel.timeout)
self.assertEqual(sentinel.protocol, self.mock.protocol)
self.assertEqual(sentinel.max_connections, self.mock.max_connections)
self.assertEqual(sentinel.timeout, self.mock.timeout)
self.assertEqual(sock, self.mock.server_socket)
@patch.object(network, 'create_socket', spec=socket.SocketType)
def test_create_server_socket_sets_up_listener(self, create_socket):
sock = create_socket.return_value
network.Server.create_server_socket(
self.mock, sentinel.host, sentinel.port)
sock.setblocking.assert_called_once_with(False)
sock.bind.assert_called_once_with((sentinel.host, sentinel.port))
sock.listen.assert_called_once_with(any_int)
@patch.object(network, 'create_socket', new=Mock())
def test_create_server_socket_fails(self):
network.create_socket.side_effect = socket.error
with self.assertRaises(socket.error):
network.Server.create_server_socket(
self.mock, sentinel.host, sentinel.port)
@patch.object(network, 'create_socket', new=Mock())
def test_create_server_bind_fails(self):
sock = network.create_socket.return_value
sock.bind.side_effect = socket.error
with self.assertRaises(socket.error):
network.Server.create_server_socket(
self.mock, sentinel.host, sentinel.port)
@patch.object(network, 'create_socket', new=Mock())
def test_create_server_listen_fails(self):
sock = network.create_socket.return_value
sock.listen.side_effect = socket.error
with self.assertRaises(socket.error):
network.Server.create_server_socket(
self.mock, sentinel.host, sentinel.port)
@patch.object(gobject, 'io_add_watch', new=Mock())
def test_register_server_socket_sets_up_io_watch(self):
network.Server.register_server_socket(self.mock, sentinel.fileno)
gobject.io_add_watch.assert_called_once_with(
sentinel.fileno, gobject.IO_IN, self.mock.handle_connection)
def test_handle_connection(self):
self.mock.accept_connection.return_value = (
sentinel.sock, sentinel.addr)
self.mock.maximum_connections_exceeded.return_value = False
self.assertTrue(network.Server.handle_connection(
self.mock, sentinel.fileno, gobject.IO_IN))
self.mock.accept_connection.assert_called_once_with()
self.mock.maximum_connections_exceeded.assert_called_once_with()
self.mock.init_connection.assert_called_once_with(
sentinel.sock, sentinel.addr)
self.assertEqual(0, self.mock.reject_connection.call_count)
def test_handle_connection_exceeded_connections(self):
self.mock.accept_connection.return_value = (
sentinel.sock, sentinel.addr)
self.mock.maximum_connections_exceeded.return_value = True
self.assertTrue(network.Server.handle_connection(
self.mock, sentinel.fileno, gobject.IO_IN))
self.mock.accept_connection.assert_called_once_with()
self.mock.maximum_connections_exceeded.assert_called_once_with()
self.mock.reject_connection.assert_called_once_with(
sentinel.sock, sentinel.addr)
self.assertEqual(0, self.mock.init_connection.call_count)
def test_accept_connection(self):
sock = Mock(spec=socket.SocketType)
sock.accept.return_value = (sentinel.sock, sentinel.addr)
self.mock.server_socket = sock
sock, addr = network.Server.accept_connection(self.mock)
self.assertEqual(sentinel.sock, sock)
self.assertEqual(sentinel.addr, addr)
def test_accept_connection_recoverable_error(self):
sock = Mock(spec=socket.SocketType)
self.mock.server_socket = sock
for error in (errno.EAGAIN, errno.EINTR):
sock.accept.side_effect = socket.error(error, '')
with self.assertRaises(network.ShouldRetrySocketCall):
network.Server.accept_connection(self.mock)
# FIXME decide if this should be allowed to propegate
def test_accept_connection_unrecoverable_error(self):
sock = Mock(spec=socket.SocketType)
self.mock.server_socket = sock
sock.accept.side_effect = socket.error
with self.assertRaises(socket.error):
network.Server.accept_connection(self.mock)
def test_maximum_connections_exceeded(self):
self.mock.max_connections = 10
self.mock.number_of_connections.return_value = 11
self.assertTrue(network.Server.maximum_connections_exceeded(self.mock))
self.mock.number_of_connections.return_value = 10
self.assertTrue(network.Server.maximum_connections_exceeded(self.mock))
self.mock.number_of_connections.return_value = 9
self.assertFalse(
network.Server.maximum_connections_exceeded(self.mock))
@patch('pykka.registry.ActorRegistry.get_by_class')
def test_number_of_connections(self, get_by_class):
self.mock.protocol = sentinel.protocol
get_by_class.return_value = [1, 2, 3]
self.assertEqual(3, network.Server.number_of_connections(self.mock))
get_by_class.return_value = []
self.assertEqual(0, network.Server.number_of_connections(self.mock))
@patch.object(network, 'Connection', new=Mock())
def test_init_connection(self):
self.mock.protocol = sentinel.protocol
self.mock.protocol_kwargs = {}
self.mock.timeout = sentinel.timeout
network.Server.init_connection(self.mock, sentinel.sock, sentinel.addr)
network.Connection.assert_called_once_with(
sentinel.protocol, {}, sentinel.sock, sentinel.addr,
sentinel.timeout)
def test_reject_connection(self):
sock = Mock(spec=socket.SocketType)
network.Server.reject_connection(
self.mock, sock, (sentinel.host, sentinel.port))
sock.close.assert_called_once_with()
def test_reject_connection_error(self):
sock = Mock(spec=socket.SocketType)
sock.close.side_effect = socket.error
network.Server.reject_connection(
self.mock, sock, (sentinel.host, sentinel.port))
sock.close.assert_called_once_with()
| {
"content_hash": "a71ff000ad214a89fb4768b037020a76",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 79,
"avg_line_length": 40.58080808080808,
"alnum_prop": 0.6747977598008712,
"repo_name": "woutervanwijk/mopidy",
"id": "eebc9ea2319f20ac5684e90984fb2c6bd0dfa3cb",
"size": "8035",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/utils/network/test_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "JavaScript",
"bytes": "74911"
},
{
"name": "Python",
"bytes": "925399"
}
],
"symlink_target": ""
} |
import math
import cPickle as pickle
class NaiveBayes():
def strip_punctuation(self, text):
return "".join(c for c in text if c not in ('!','.',':'))
def __init__(self):
self.democrats = pickle.load(open('democrats.pickle', 'rb'))
self.republicans = pickle.load(open('republicans.pickle', 'rb'))
def get_leaning(self, text):
words = [self.strip_punctuation(word) for word in text.split()]
dem_score = 0
rep_score = 0
for word in words:
if word in self.democrats:
dem_score += self.democrats[word]
if word in self.republicans:
rep_score += self.republicans[word]
diff = abs(dem_score - rep_score)
if diff > 80:
return 'Democrat'
else:
return 'Republican'
| {
"content_hash": "57c70368df23705759fef43a044c4401",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 66,
"avg_line_length": 26.11111111111111,
"alnum_prop": 0.6609929078014184,
"repo_name": "USA-Hacks/Politik-Back",
"id": "a1fee43cea7065f51e5ad250df242a4a325efe43",
"size": "705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml/naive_bayes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "COBOL",
"bytes": "10418760"
},
{
"name": "Python",
"bytes": "5977"
}
],
"symlink_target": ""
} |
from quickbooks.objects import Ref
from huxley.invoice_automation.src.util.query_utils import construct_invoice_query
class TestQueryUtils:
def test_construct_invoice_query_happyPath(self):
# Setup
customer_ref = Ref()
customer_ref.value = 1
# Act
query = construct_invoice_query(customer_ref)
# Verify
expected_query = 'SELECT * FROM Invoice WHERE CustomerRef=1 AND MetaData.CreateTime>=\'2022-08-01T00:00:00\''
assert query == expected_query
| {
"content_hash": "2259b09e94a9ce76c62be93f8ae8b586",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 117,
"avg_line_length": 30.41176470588235,
"alnum_prop": 0.6769825918762089,
"repo_name": "bmun/huxley",
"id": "4bfa29417700c3cef7a710963ede4afad40d3d36",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huxley/invoice_automation/tst/util/test_query_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "13301"
},
{
"name": "JavaScript",
"bytes": "400597"
},
{
"name": "Less",
"bytes": "19215"
},
{
"name": "Python",
"bytes": "635783"
},
{
"name": "Shell",
"bytes": "2475"
}
],
"symlink_target": ""
} |
from lxml import html
import requests
def get_values(response_dict):
'''Takes a dictionary created from the json given by evepraisal. Returns
a tuple of the form (total_blueloot_value, total_salvage_value).'''
#
# CONSTANTS:
#
ec_marketdata_url = 'http://api.eve-central.com/api/marketstat'
system = {
'Jita': 30000142,
'Amarr': 30002187,
'Dodixie': 30002659,
'Rens': 30002510,
'Hek': 30002053,
}
#
# DEAL WITH SLEEPER LOOT
#
# make a dictionary of only "Sleeper Components" (groupID 880)
blueloot = {item['typeID']: item['quantity']
for item in response_dict['items']
if item['groupID'] == 880}
# make blueloot URL parameter payload dictionary
blueloot_payload = {
'usesystem': system['Amarr'],
'typeid': [key for key in blueloot.keys()]
}
# make blueloot price request and create a document tree
blueloot_response = requests.get(ec_marketdata_url, blueloot_payload)
blueloot_tree = html.fromstring(blueloot_response.content)
# initialize blueloot total to zero
blueloot_value = 0
# for every typeid of blueloot
for (typeid, quantity) in blueloot.items():
# create path, use it to get typeid's price from the tree
# note! blueloot price is MAX AMARR BUY ORDER
path = '//*[@id="%d"]/buy/max/text()' % typeid
price = float(blueloot_tree.xpath(path)[0])
# calculate value of the stack of this typeid
typeid_stack_value = price * quantity
# update the total value
blueloot_value += typeid_stack_value
print 'total blueloot value', blueloot_value
#
# DEAL WITH SALVAGE
#
# make a dictionary of everything else (not groupID 880)
salvage = {item['typeID']: item['quantity']
for item in response_dict['items']
if item['groupID'] != 880}
# make salvage URL parameter payload dictionary
salvage_payload = {
'usesystem': system['Dodixie'],
'typeid': [key for key in salvage.keys()]
}
# make salvage price request and create a document tree
salvage_response = requests.get(ec_marketdata_url, salvage_payload)
salvage_tree = html.fromstring(salvage_response.content)
# initialize salvage total to zero
salvage_value = 0
# for every typeid of salvage
for (typeid, quantity) in salvage.items():
# create path, use it to get typeid's price from the tree note!
# salvage price is AVERAGE OF MAX DODIXIE BUY AND MIN DODIXIE SELL
# ORDERS
buy_path = '//*[@id="%d"]/buy/max/text()' % typeid
sell_path = '//*[@id="%d"]/sell/min/text()' % typeid
buy_price = float(salvage_tree.xpath(buy_path)[0])
sell_price = float(salvage_tree.xpath(sell_path)[0])
price = (buy_price + sell_price) / 2.0
# calculate value of the stack of this typeid
typeid_stack_value = price * quantity
# update the total value
salvage_value += typeid_stack_value
print 'total salvage value', salvage_value
return (blueloot_value, salvage_value)
| {
"content_hash": "a683b6576eb6f05eafeba73afe7cc6df",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 77,
"avg_line_length": 30.87378640776699,
"alnum_prop": 0.6226415094339622,
"repo_name": "all-out/lootparser",
"id": "f06dfa2420a0efdf06cf7c18c28054858623cfc3",
"size": "3180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37"
},
{
"name": "HTML",
"bytes": "7246"
},
{
"name": "Python",
"bytes": "15628"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from .var_loader import VarLoader
class Config(object):
"""Container for the top-level test configuration.
This contains all of the top-level configuration, such as the target
host and variables to be used in test cases.
"""
def __init__(self, scheme, host, variables, var_loader, test_filename):
super(Config, self).__init__()
self.var_loader = var_loader
self.scheme = scheme
self.variables = variables
self.host = self.load_variable('host', host)
self.test_filename = test_filename
@classmethod
def from_dict(cls, config, test_filename):
var_loader = VarLoader(test_filename)
variables = var_loader.load_variables(config.get('vars', {}))
return cls(
scheme=config.get('scheme', 'http'),
host=config['host'],
variables=variables,
var_loader=var_loader,
test_filename=test_filename,
)
def load_variable(self, name, var):
return self.var_loader.load_variable(name, var, self.variables)
| {
"content_hash": "9546702786edc8ffd4601f0258799b1b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 32.31428571428572,
"alnum_prop": 0.6304155614500442,
"repo_name": "sjagoe/usagi",
"id": "e604327456824220843bd1a53fb94218e148b0ad",
"size": "1366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usagi/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "166559"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.shortcuts import redirect
import logging, re
class NextURLMiddleware(object):
'''
The middleware to intercept request when method is "GET" and URL match with NEXT_URL_INTERCEPTION (default is '^/?$').
It will do nothing and return the URL redirect when URL is matched with one of NEXT_URL_INTERCEPTION (default is '^/?$').
settings.py
-----------
NEXT_URL_PARAM='next'
NEXT_URL_INTERCEPTION=['^/?$', ]
'''
def __init__(self, get_response):
self.logger=logging.getLogger('webframe.NextURLMiddleware')
self.get_response=get_response
self.NEXT_URL_PARAM=getattr(settings, 'NEXT_URL_PARAM', 'next')
self.NEXT_URL_INTERCEPTION=getattr(settings, 'NEXT_URL_INTERCEPTION', ['^/?$', ])
def __call__(self, req):
if req.method=='GET':
for pattern in self.NEXT_URL_INTERCEPTION:
# Find the match pattern
if re.match(pattern, req.get_full_path()):
next=req.session.get(self.NEXT_URL_PARAM, None)
if next:
del req.session[self.NEXT_URL_PARAM] #Remove and reset the session
self.logger.debug('Redirect to session[\'{0}\']: {1}'.format(self.NEXT_URL_PARAM, next))
return redirect(next)
#End-For
#End if req.method=='GET'
rep=self.get_response(req)
return rep
| {
"content_hash": "f2603a2ba54926b72c91c67a0417f18c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 124,
"avg_line_length": 38.888888888888886,
"alnum_prop": 0.6221428571428571,
"repo_name": "kensonman/webframe",
"id": "340353b1414515f5b36ed59b43b8e7da34cb10e3",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NextURLMiddleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6263"
},
{
"name": "HTML",
"bytes": "58285"
},
{
"name": "JavaScript",
"bytes": "377759"
},
{
"name": "Python",
"bytes": "262504"
},
{
"name": "SCSS",
"bytes": "3786"
},
{
"name": "Vue",
"bytes": "13512"
}
],
"symlink_target": ""
} |
import logging
import re
import socket
import binascii
import struct
import xml.etree.ElementTree as ET
from time import sleep
from struct import pack
from types import MethodType
from threading import Thread, Event
from socket import AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
from os.path import dirname
from avatar2.targets import TargetStates
l = logging.getLogger('avatar2.gdbplugin')
chksum = lambda x: sum(x) & 0xff
match_hex = lambda m, s: [int(x, 16) for x in re.match(m, s).groups()]
TIMEOUT_TIME = 1.0
class GDBRSPServer(Thread):
def __init__(self, avatar, target, port=3333, xml_file=None,
do_forwarding=False):
super().__init__()
self.daemon=True
self.sock = socket.socket(AF_INET, SOCK_STREAM)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.avatar = avatar
self.target = target
self.port = port
self.xml_file = xml_file
self.do_forwarding = do_forwarding
self._packetsize=0x47FF
self.running = False
self.bps = {}
self._do_shutdown = Event()
xml_regs = ET.parse(self.xml_file).getroot().find('feature')
self.registers = [reg.attrib for reg in xml_regs if reg.tag == 'reg']
assert(len(self.registers))
self.handlers = {
'q' : self.query,
'v' : self.multi_letter_cmd,
'H' : self.set_thread_op,
'?' : self.halt_reason,
'g' : self.read_registers,
'G' : self.reg_write,
'm' : self.mem_read,
'M' : self.mem_write,
'c' : self.cont,
'C' : self.cont, #cond with signal, we don't care
's' : self.step,
'S' : self.step,
'S' : self.step_signal,
'Z' : self.insert_breakpoint,
'z' : self.remove_breakpoint,
'D' : self.detach,
}
def shutdown(self):
self._do_shutdown.set()
sleep(TIMEOUT_TIME*2)
def run(self):
l.info(f'GDB server listening on port {self.port}, please connect')
self.sock.bind(('', self.port))
self.sock.settimeout(TIMEOUT_TIME)
self.sock.listen(1)
while not self._do_shutdown.isSet():
try:
self.conn, addr = self.sock.accept()
except socket.timeout:
continue
self.conn.settimeout(TIMEOUT_TIME)
l.info(f'Accepted connection from {addr}')
if not self.target.state & TargetStates.STOPPED:
self.target.stop()
while self.conn._closed is False:
packet = self.receive_packet()
if packet is None:
continue
l.debug(f'Received: {packet}')
self.send_raw(b'+') # send ACK
handler = self.handlers.get(chr(packet[0]),
self.not_implemented)
resp = handler(packet)
if resp is not None:
self.send_packet(resp)
self.sock.close()
### Handlers
def not_implemented(self, pkt):
l.critical(f'Received not implemented packet: {pkt}')
return b''
def query(self, pkt):
if pkt[1:].startswith(b'Supported') is True:
feat = [b'PacketSize=%x' % self._packetsize,
b'qXfer:features:read+'
]
return b';'.join(feat)
if pkt[1:].startswith(b'Attached') is True:
return b'1'
if pkt[1:].startswith(b'Xfer:features:read:target.xml') is True:
off, length = match_hex('qXfer:features:read:target.xml:(.*),(.*)',
pkt.decode())
with open(self.xml_file, 'rb') as f:
data = f.read()
resp_data = data[off:off+length]
if len(resp_data) < length:
prefix = b'l'
else:
prefix = b'm'
return prefix+resp_data
if pkt[1:].startswith(b'fThreadInfo') is True:
return b'm1'
if pkt[1:].startswith(b'sThreadInfo') is True:
return b'l'
if pkt[1:].startswith(b'Rcmd') is True: # Monitor commands
try:
cmd = re.match('qRcmd,(.*)',pkt.decode())[1]
cmd = binascii.a2b_hex(cmd)
l.debug(f'Receiced cmd: {cmd}')
res = eval(cmd)
self.send_packet(b'O' \
+ binascii.b2a_hex(repr(res).encode()) \
+ b'0a')
return b'OK'
except Exception as e:
self.send_packet(b'O' + b'ERROR: '.hex().encode())
if hasattr(e, 'msg'):
self.send_packet(b'O' \
+ e.msg.encode().hex().encode() \
+ b'0a')
elif hasattr(e, 'args'):
self.send_packet(b'O' \
+ e.args[0].encode().hex().encode() \
+ b'0a')
return b'OK'
return b''
def multi_letter_cmd(self, pkt):
if pkt[1:].startswith(b'vMustReplyEmpty') is True:
return b''
return b''
def set_thread_op(self, pkt):
return b'OK' # we don't implement threads yet
def halt_reason(self, pkt):
return b'S00' # we don't specify the signal yet
def read_registers(self, pkt):
resp = ''
for reg in self.registers:
bitsize = int(reg['bitsize'])
assert( bitsize % 8 == 0)
r_len = int(bitsize / 8)
r_val = self.target.read_register(reg['name'])
#l.debug(f'{reg["name"]}, {r_val}, {r_len}')
resp += r_val.to_bytes(r_len, 'little').hex()
return resp.encode()
def reg_write(self, pkt):
idx = 1 # ignore the first char of pkt
for reg in self.registers:
bitsize = int(reg['bitsize'])
r_len = int(bitsize / 8)
r_val = pkt[idx: idx + r_len*2]
r_raw = bytes.fromhex(r_val.decode())
int_val = int.from_bytes(r_raw, byteorder='little')
self.target.write_register(reg['name'], int_val)
idx += r_len*2
return b'OK'
def mem_read(self, pkt):
try:
addr, n = match_hex('m(.*),(.*)', pkt.decode())
if self.do_forwarding is True:
mr = self.avatar.get_memory_range(addr)
if mr is not None and mr.forwarded is True:
val = mr.forwarded_to.read_memory(addr, n)
val = val.to_bytes(n, byteorder='little')
return binascii.b2a_hex(val)
val = self.target.read_memory(addr, n, raw=True).hex()
return val.encode()
except Exception as e:
l.warn(f'Error in mem_read: {e}')
return b'E00'
def mem_write(self, pkt):
try:
addr, n, val = match_hex('M(.*),(.*):(.*)', pkt.decode())
raw_val = val.to_bytes(n, byteorder='big') # wtf :/
if self.do_forwarding is True:
mr = self.avatar.get_memory_range(addr)
if mr is not None and mr.forwarded is True:
int_val = int.from_bytes(raw_val,byteorder='little')
mr.forwarded_to.write_memory(addr, n, int_val)
return b'OK'
self.target.write_memory(addr, n, raw_val, raw=True)
return b'OK'
except Exception as e:
l.warn(f'Error in mem_write: {e}')
return b'E00'
def cont(self, pkt):
self.target.cont()
self.running = True
return b'OK'
def step(self, pkt):
self.target.step()
return b'S00'
def step_signal(self, pkt):
self.target.step()
return pkt[1:]
def insert_breakpoint(self, pkt):
addr, kind = match_hex('Z0,(.*),(.*)', pkt.decode())
bpno = self.target.set_breakpoint(addr)
self.bps[bpno] = addr
return b'OK'
def remove_breakpoint(self, pkt):
addr, kind = match_hex('z0,(.*),(.*)', pkt.decode())
matches = []
for n, a in self.bps.items():
if a == addr:
matches.append(n)
if len(matches) == 0:
l.warn(f'GDB tried to remove non existing bp for {addr}')
l.info(self.bps)
return b'E00'
self.target.remove_breakpoint(n)
self.bps.pop(n)
return b'OK'
def detach(self, pkt):
l.info("Exiting GDB server")
if not self.target.state & TargetStates.EXITED:
for bpno in self.bps.items():
self.target.remove_breakpoint(bpno)
self.target.cont()
if self.conn._closed is False:
self.send_packet(b'OK')
self.conn.close()
return None
### Sending and receiving
def send_packet(self, pkt):
if type(pkt) == str:
raise Exception("Packet require bytes, not strings")
self.send_raw(b'$%b#%02x' % (pkt, chksum(pkt)))
def send_raw(self, raw_bytes):
l.debug(f'Sending data: {raw_bytes}')
self.conn.send(raw_bytes)
def check_breakpoint_hit(self):
if self.target.state & TargetStates.STOPPED and self.running is True:
if self.target.regs.pc in self.bps.values():
self.running = False
self.send_packet(b'S05')
def receive_packet(self):
pkt_finished = False
pkt_receiving = False
while pkt_finished is False:
try:
c = self.conn.recv(1)
except socket.timeout:
if self._do_shutdown.isSet():
self.send_packet(b'S03')
self.conn.close()
return
if self.target.state & TargetStates.EXITED:
self.send_packet(b'S03')
self.conn.close()
return
self.check_breakpoint_hit()
continue
if c == b'\x03':
if not self.target.state & TargetStates.STOPPED:
self.target.stop()
self.send_packet(b'S02')
elif c == b'$': # start of package
pkt = b''
pkt_receiving = True
elif c == b'#': # end of package
checksum = self.conn.recv(2)
if int(checksum, 16) == chksum(pkt):
return pkt
else:
raise Exception('Checksum Error')
elif pkt_receiving == True:
pkt += c
def spawn_gdb_server(self, target, port, do_forwarding=True, xml_file=None):
if xml_file is None:
# default for now: use ARM
xml_file = f'{dirname(__file__)}/gdb/arm-target.xml'
server = GDBRSPServer(self, target, port, xml_file, do_forwarding)
server.start()
self._gdb_servers.append(server)
return server
def exit_server(avatar, watched_target):
for s in avatar._gdb_servers:
if s.target == watched_target:
s.shutdown()
avatar._gdb_servers.remove(s)
def load_plugin(avatar):
avatar.spawn_gdb_server = MethodType(spawn_gdb_server, avatar)
avatar.watchmen.add_watchman('TargetShutdown', when='before',
callback=exit_server)
avatar._gdb_servers = []
| {
"content_hash": "7984613178c4c1d2c531be5db2a17678",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 79,
"avg_line_length": 31.768194070080863,
"alnum_prop": 0.4976243000169693,
"repo_name": "avatartwo/avatar2",
"id": "19ad3834784658525817d21bcfce76ae773ccd20",
"size": "11802",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "avatar2/plugins/gdbserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2369"
},
{
"name": "Python",
"bytes": "368478"
},
{
"name": "Shell",
"bytes": "2439"
}
],
"symlink_target": ""
} |
"""
A custom manager for working with trees of objects.
"""
import contextlib
from django.db import models, transaction, connections, router
from django.db.models import F, Max
from django.utils.translation import ugettext as _
from mptt.exceptions import CantDisableUpdates, InvalidMove
__all__ = ('TreeManager',)
COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
class TreeManager(models.Manager):
"""
A manager for working with trees of objects.
"""
def init_from_model(self, model):
"""
Sets things up. This would normally be done in contribute_to_class(),
but Django calls that before we've created our extra tree fields on the
model (which we need). So it's done here instead, after field setup.
"""
# Avoid calling "get_field_by_name()", which populates the related
# models cache and can cause circular imports in complex projects.
# Instead, find the tree_id field using "get_fields_with_model()".
[tree_field] = [fld for fld in model._meta.get_fields_with_model() if fld[0].name == self.tree_id_attr]
if tree_field[1]:
# tree_model is the model that contains the tree fields.
# this is usually just the same as model, but not for derived models.
self.tree_model = tree_field[1]
else:
self.tree_model = model
self._base_manager = None
if self.tree_model is not model:
# _base_manager is the treemanager on tree_model
self._base_manager = self.tree_model._tree_manager
@contextlib.contextmanager
def disable_mptt_updates(self):
"""
Context manager. Disables mptt updates.
NOTE that this context manager causes inconsistencies! MPTT model methods are
not guaranteed to return the correct results.
When to use this method:
If used correctly, this method can be used to speed up bulk updates.
This doesn't do anything clever. It *will* mess up your tree.
You should follow this method with a call to TreeManager.rebuild() to ensure your
tree stays sane, and you should wrap both calls in a transaction.
This is best for updates that span a large part of the table.
If you are doing localised changes (1 tree, or a few trees) consider
using delay_mptt_updates.
If you are making only minor changes to your tree, just let the updates happen.
Transactions:
This doesn't enforce any transactional behavior.
You should wrap this in a transaction to ensure database consistency.
If updates are already disabled on the model, this is a noop.
Usage::
with transaction.commit_on_success():
with MyNode.objects.disable_mptt_updates():
## bulk updates.
MyNode.objects.rebuild()
"""
# Error cases:
if self.model._meta.abstract:
# * an abstract model. Design decision needed - do we disable updates for
# all concrete models that derive from this model?
# I vote no - that's a bit implicit and it's a weird use-case anyway.
# Open to further discussion :)
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it's an abstract model" % self.model.__name__
)
elif self.model._meta.proxy:
# * a proxy model. disabling updates would implicitly affect other models
# using the db table. Caller should call this on the manager for the concrete
# model instead, to make the behavior explicit.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it's a proxy model. Call the concrete model instead."
% self.model.__name__
)
elif self.tree_model is not self.model:
# * a multiple-inheritance child of an MPTTModel.
# Disabling updates may affect instances of other models in the tree.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it doesn't contain the mptt fields."
% self.model.__name__
)
if not self.model._mptt_updates_enabled:
# already disabled, noop.
yield
else:
self.model._set_mptt_updates_enabled(False)
try:
yield
finally:
self.model._set_mptt_updates_enabled(True)
@contextlib.contextmanager
def delay_mptt_updates(self):
"""
Context manager. Delays mptt updates until the end of a block of bulk processing.
NOTE that this context manager causes inconsistencies! MPTT model methods are
not guaranteed to return the correct results until the end of the context block.
When to use this method:
If used correctly, this method can be used to speed up bulk updates.
This is best for updates in a localised area of the db table, especially if all
the updates happen in a single tree and the rest of the forest is left untouched.
No subsequent rebuild is necessary.
delay_mptt_updates does a partial rebuild of the modified trees (not the whole table).
If used indiscriminately, this can actually be much slower than just letting the updates
occur when they're required.
The worst case occurs when every tree in the table is modified just once.
That results in a full rebuild of the table, which can be *very* slow.
If your updates will modify most of the trees in the table (not a small number of trees),
you should consider using TreeManager.disable_mptt_updates, as it does much fewer
queries.
Transactions:
This doesn't enforce any transactional behavior.
You should wrap this in a transaction to ensure database consistency.
Exceptions:
If an exception occurs before the processing of the block, delayed updates
will not be applied.
Usage::
with transaction.commit_on_success():
with MyNode.objects.delay_mptt_updates():
## bulk updates.
"""
with self.disable_mptt_updates():
if self.model._mptt_is_tracking:
# already tracking, noop.
yield
else:
self.model._mptt_start_tracking()
try:
yield
except Exception:
# stop tracking, but discard results
self.model._mptt_stop_tracking()
raise
results = self.model._mptt_stop_tracking()
partial_rebuild = self.partial_rebuild
for tree_id in results:
partial_rebuild(tree_id)
@property
def parent_attr(self):
return self.model._mptt_meta.parent_attr
@property
def left_attr(self):
return self.model._mptt_meta.left_attr
@property
def right_attr(self):
return self.model._mptt_meta.right_attr
@property
def tree_id_attr(self):
return self.model._mptt_meta.tree_id_attr
@property
def level_attr(self):
return self.model._mptt_meta.level_attr
def _translate_lookups(self, **lookups):
new_lookups = {}
join_parts = '__'.join
for k, v in list(lookups.items()):
parts = k.split('__')
new_parts = []
new_parts__append = new_parts.append
for part in parts:
new_parts__append(getattr(self, part + '_attr', part))
new_lookups[join_parts(new_parts)] = v
return new_lookups
def _mptt_filter(self, qs=None, **filters):
"""
Like self.filter(), but translates name-agnostic filters for MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_filter(qs=qs, **filters)
if qs is None:
qs = self.get_query_set()
return qs.filter(**self._translate_lookups(**filters))
def _mptt_update(self, qs=None, **items):
"""
Like self.update(), but translates name-agnostic MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_update(qs=qs, **items)
if qs is None:
qs = self.get_query_set()
return qs.update(**self._translate_lookups(**items))
def _get_connection(self, **hints):
return connections[router.db_for_write(self.model, **hints)]
def add_related_count(self, queryset, rel_model, rel_field, count_attr,
cumulative=False):
"""
Adds a related item count to a given ``QuerySet`` using its
``extra`` method, for a ``Model`` class which has a relation to
this ``Manager``'s ``Model`` class.
Arguments:
``rel_model``
A ``Model`` class which has a relation to this `Manager``'s
``Model`` class.
``rel_field``
The name of the field in ``rel_model`` which holds the
relation.
``count_attr``
The name of an attribute which should be added to each item in
this ``QuerySet``, containing a count of how many instances
of ``rel_model`` are related to it through ``rel_field``.
``cumulative``
If ``True``, the count will be for each item and all of its
descendants, otherwise it will be for each item itself.
"""
connection = self._get_connection()
qn = connection.ops.quote_name
meta = self.model._meta
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
return queryset.extra(select={count_attr: subquery})
def get_query_set(self):
"""
Returns a ``QuerySet`` which contains all tree items, ordered in
such a way that that root nodes appear in tree id order and
their subtrees appear in depth-first order.
"""
return super(TreeManager, self).get_query_set().order_by(
self.tree_id_attr, self.left_attr)
def insert_node(self, node, target, position='last-child', save=False, allow_existing_pk=False):
"""
Sets up the tree state for ``node`` (which has not yet been
inserted into in the database) so it will be positioned relative
to a given ``target`` node as specified by ``position`` (when
appropriate) it is inserted, with any neccessary space already
having been made for it.
A ``target`` of ``None`` indicates that ``node`` should be
the last root node.
If ``save`` is ``True``, ``node``'s ``save()`` method will be
called before it is returned.
NOTE: This is a low-level method; it does NOT respect ``MPTTMeta.order_insertion_by``.
In most cases you should just set the node's parent and let mptt call this during save.
"""
if self._base_manager:
return self._base_manager.insert_node(node, target, position=position, save=save)
if node.pk and not allow_existing_pk and self.filter(pk=node.pk).exists():
raise ValueError(_('Cannot insert a node which has already been saved.'))
if target is None:
tree_id = self._get_next_tree_id()
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
elif target.is_root_node() and position in ['left', 'right']:
target_tree_id = getattr(target, self.tree_id_attr)
if position == 'left':
tree_id = target_tree_id
space_target = target_tree_id - 1
else:
tree_id = target_tree_id + 1
space_target = target_tree_id
self._create_tree_space(space_target)
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
else:
setattr(node, self.left_attr, 0)
setattr(node, self.level_attr, 0)
space_target, level, left, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
tree_id = getattr(parent, self.tree_id_attr)
self._create_space(2, space_target, tree_id)
setattr(node, self.left_attr, -left)
setattr(node, self.right_attr, -left + 1)
setattr(node, self.level_attr, -level)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, parent)
if parent:
self._post_insert_update_cached_parent_right(parent, right_shift)
if save:
node.save()
return node
def _move_node(self, node, target, position='last-child', save=True):
if self._base_manager:
return self._base_manager.move_node(node, target, position=position)
if self.tree_model._mptt_is_tracking:
# delegate to insert_node and clean up the gaps later.
return self.insert_node(node, target, position=position, save=save, allow_existing_pk=True)
else:
if target is None:
if node.is_child_node():
self._make_child_root_node(node)
elif target.is_root_node() and position in ('left', 'right'):
self._make_sibling_of_root_node(node, target, position)
else:
if node.is_root_node():
self._move_root_node(node, target, position)
else:
self._move_child_node(node, target, position)
transaction.commit_unless_managed()
def move_node(self, node, target, position='last-child'):
"""
Moves ``node`` relative to a given ``target`` node as specified
by ``position`` (when appropriate), by examining both nodes and
calling the appropriate method to perform the move.
A ``target`` of ``None`` indicates that ``node`` should be
turned into a root node.
Valid values for ``position`` are ``'first-child'``,
``'last-child'``, ``'left'`` or ``'right'``.
``node`` will be modified to reflect its new tree state in the
database.
This method explicitly checks for ``node`` being made a sibling
of a root node, as this is a special case due to our use of tree
ids to order root nodes.
NOTE: This is a low-level method; it does NOT respect ``MPTTMeta.order_insertion_by``.
In most cases you should just move the node yourself by setting node.parent.
"""
self._move_node(node, target, position=position)
def root_node(self, tree_id):
"""
Returns the root node of the tree with the given id.
"""
if self._base_manager:
return self._base_manager.root_node(tree_id)
return self._mptt_filter(tree_id=tree_id, parent=None).get()
def root_nodes(self):
"""
Creates a ``QuerySet`` containing root nodes.
"""
if self._base_manager:
return self._base_manager.root_nodes()
return self._mptt_filter(parent=None)
def rebuild(self):
"""
Rebuilds whole tree in database using `parent` link.
"""
if self._base_manager:
return self._base_manager.rebuild()
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
idx = 0
for pk in pks:
idx += 1
rebuild_helper(pk, 1, idx)
def partial_rebuild(self, tree_id):
if self._base_manager:
return self._base_manager.partial_rebuild(tree_id)
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None, tree_id=tree_id)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
if not pks:
return
if len(pks) > 1:
raise RuntimeError("More than one root node with tree_id %d. That's invalid, do a full rebuild." % tree_id)
self._rebuild_helper(pks[0], 1, tree_id)
def _rebuild_helper(self, pk, left, tree_id, level=0):
opts = self.model._mptt_meta
right = left + 1
qs = self._mptt_filter(parent__pk=pk)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
child_ids = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
for child_id in child_ids:
right = rebuild_helper(child_id, right, tree_id, level + 1)
qs = self.model._default_manager.filter(pk=pk)
self._mptt_update(qs,
left=left,
right=right,
level=level,
tree_id=tree_id
)
return right + 1
def _post_insert_update_cached_parent_right(self, instance, right_shift, seen=None):
setattr(instance, self.right_attr, getattr(instance, self.right_attr) + right_shift)
attr = '_%s_cache' % self.parent_attr
if hasattr(instance, attr):
parent = getattr(instance, attr)
if parent:
if not seen:
seen = set()
seen.add(instance)
if parent in seen:
# detect infinite recursion and throw an error
raise InvalidMove
self._post_insert_update_cached_parent_right(parent, right_shift, seen=seen)
def _calculate_inter_tree_move_values(self, node, target, position):
"""
Calculates values required when moving ``node`` relative to
``target`` as specified by ``position``.
"""
left = getattr(node, self.left_attr)
level = getattr(node, self.level_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if position == 'last-child':
space_target = target_right - 1
else:
space_target = target_left
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if position == 'left':
space_target = target_left - 1
else:
space_target = target_right
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_right_change = left - space_target - 1
right_shift = 0
if parent:
right_shift = 2 * (node.get_descendant_count() + 1)
return space_target, level_change, left_right_change, parent, right_shift
def _close_gap(self, size, target, tree_id):
"""
Closes a gap of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(-size, target, tree_id)
def _create_space(self, size, target, tree_id):
"""
Creates a space of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(size, target, tree_id)
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
qs = self._mptt_filter(tree_id__gt=target_tree_id)
self._mptt_update(qs, tree_id=F(self.tree_id_attr) + num_trees)
self.tree_model._mptt_track_tree_insertions(target_tree_id + 1, num_trees)
def _get_next_tree_id(self):
"""
Determines the next largest unused tree id for the tree managed
by this manager.
"""
qs = self.get_query_set()
max_tree_id = list(qs.aggregate(Max(self.tree_id_attr)).values())[0]
max_tree_id = max_tree_id or 0
return max_tree_id + 1
def _inter_tree_move_and_close_gap(self, node, level_change,
left_right_change, new_tree_id, parent_pk=None):
"""
Removes ``node`` from its current tree, with the given set of
changes being applied to ``node`` and its descendants, closing
the gap left by moving ``node`` as it does so.
If ``parent_pk`` is ``None``, this indicates that ``node`` is
being moved to a brand new tree as its root node, and will thus
have its parent field set to ``NULL``. Otherwise, ``node`` will
have ``parent_pk`` set for its parent field.
"""
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
inter_tree_move_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(tree_id)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %%s
ELSE %(tree_id)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s - %%s
WHEN %(left)s > %%s
THEN %(left)s - %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s - %%s
WHEN %(right)s > %%s
THEN %(right)s - %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %(new_parent)s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'new_parent': parent_pk is None and 'NULL' or '%s',
}
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
gap_size = right - left + 1
gap_target_left = left - 1
params = [
left, right, level_change,
left, right, new_tree_id,
left, right, left_right_change,
gap_target_left, gap_size,
left, right, left_right_change,
gap_target_left, gap_size,
node.pk,
getattr(node, self.tree_id_attr)
]
if parent_pk is not None:
params.insert(-1, parent_pk)
cursor = connection.cursor()
cursor.execute(inter_tree_move_query, params)
def _make_child_root_node(self, node, new_tree_id=None):
"""
Removes ``node`` from its tree, making it the root node of a new
tree.
If ``new_tree_id`` is not specified a new tree id will be
generated.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
if not new_tree_id:
new_tree_id = self._get_next_tree_id()
left_right_change = left - 1
self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, None)
node._mptt_cached_fields[self.parent_attr] = None
def _make_sibling_of_root_node(self, node, target, position):
"""
Moves ``node``, making it a sibling of the given ``target`` root
node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
Since we use tree ids to reduce the number of rows affected by
tree mangement during insertion and deletion, root nodes are not
true siblings; thus, making an item a sibling of a root node is
a special case which involves shuffling tree ids around.
"""
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
opts = self.model._meta
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if node.is_child_node():
if position == 'left':
space_target = target_tree_id - 1
new_tree_id = target_tree_id
elif position == 'right':
space_target = target_tree_id
new_tree_id = target_tree_id + 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
self._create_tree_space(space_target)
if tree_id > space_target:
# The node's tree id has been incremented in the
# database - this change must be reflected in the node
# object for the method call below to operate on the
# correct tree.
setattr(node, self.tree_id_attr, tree_id + 1)
self._make_child_root_node(node, new_tree_id)
else:
if position == 'left':
if target_tree_id > tree_id:
left_sibling = target.get_previous_sibling()
if node == left_sibling:
return
new_tree_id = getattr(left_sibling, self.tree_id_attr)
lower_bound, upper_bound = tree_id, new_tree_id
shift = -1
else:
new_tree_id = target_tree_id
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
elif position == 'right':
if target_tree_id > tree_id:
new_tree_id = target_tree_id
lower_bound, upper_bound = tree_id, target_tree_id
shift = -1
else:
right_sibling = target.get_next_sibling()
if node == right_sibling:
return
new_tree_id = getattr(right_sibling, self.tree_id_attr)
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
root_sibling_query = """
UPDATE %(table)s
SET %(tree_id)s = CASE
WHEN %(tree_id)s = %%s
THEN %%s
ELSE %(tree_id)s + %%s END
WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift,
lower_bound, upper_bound])
setattr(node, self.tree_id_attr, new_tree_id)
def _manage_space(self, size, target, tree_id):
"""
Manages spaces in the tree identified by ``tree_id`` by changing
the values of the left and right columns by ``size`` after the
given ``target`` point.
"""
if self.tree_model._mptt_is_tracking:
self.tree_model._mptt_track_tree_modified(tree_id)
else:
connection = self._get_connection()
qn = connection.ops.quote_name
opts = self.model._meta
space_query = """
UPDATE %(table)s
SET %(left)s = CASE
WHEN %(left)s > %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s > %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s
AND (%(left)s > %%s OR %(right)s > %%s)""" % {
'table': qn(self.tree_model._meta.db_table),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(space_query, [target, size, target, size, tree_id,
target, target])
def _move_child_node(self, node, target, position):
"""
Calls the appropriate method to move child node ``node``
relative to the given ``target`` node as specified by
``position``.
"""
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if tree_id == target_tree_id:
self._move_child_within_tree(node, target, position)
else:
self._move_child_to_new_tree(node, target, position)
def _move_child_to_new_tree(self, node, target, position):
"""
Moves child node ``node`` to a different tree, inserting it
relative to the given ``target`` node in the new tree as
specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
new_tree_id = getattr(target, self.tree_id_attr)
space_target, level_change, left_right_change, parent, new_parent_right = \
self._calculate_inter_tree_move_values(node, target, position)
tree_width = right - left + 1
# Make space for the subtree which will be moved
self._create_space(tree_width, space_target, new_tree_id)
# Move the subtree
self._inter_tree_move_and_close_gap(node, level_change,
left_right_change, new_tree_id, parent.pk)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_child_within_tree(self, node, target, position):
"""
Moves child node ``node`` within its current tree relative to
the given ``target`` node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
width = right - left + 1
tree_id = getattr(node, self.tree_id_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
if position == 'last-child':
if target_right > right:
new_left = target_right - width
new_right = target_right - 1
else:
new_left = target_right
new_right = target_right + width - 1
else:
if target_left > left:
new_left = target_left - width + 1
new_right = target_left
else:
new_left = target_left + 1
new_right = target_left + width
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
if position == 'left':
if target_left > left:
new_left = target_left - width
new_right = target_left - 1
else:
new_left = target_left
new_right = target_left + width - 1
else:
if target_right > right:
new_left = target_right - width + 1
new_right = target_right
else:
new_left = target_right + 1
new_right = target_right + width
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_boundary = min(left, new_left)
right_boundary = max(right, new_right)
left_right_change = new_left - left
gap_size = width
if left_right_change > 0:
gap_size = -gap_size
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
# The level update must come before the left update to keep
# MySQL happy - left seems to refer to the updated value
# immediately after its update has been specified in the query
# with MySQL, but not with SQLite or Postgres.
move_subtree_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(move_subtree_query, [
left, right, level_change,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
node.pk, parent.pk,
tree_id])
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, new_left)
setattr(node, self.right_attr, new_right)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_root_node(self, node, target, position):
"""
Moves root node``node`` to a different tree, inserting it
relative to the given ``target`` node as specified by
``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
tree_id = getattr(node, self.tree_id_attr)
new_tree_id = getattr(target, self.tree_id_attr)
width = right - left + 1
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif tree_id == new_tree_id:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
space_target, level_change, left_right_change, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
# Create space for the tree which will be inserted
self._create_space(width, space_target, new_tree_id)
# Move the root node, making it a child node
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
move_tree_query = """
UPDATE %(table)s
SET %(level)s = %(level)s - %%s,
%(left)s = %(left)s - %%s,
%(right)s = %(right)s - %%s,
%(tree_id)s = %%s,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(left)s >= %%s AND %(left)s <= %%s
AND %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
}
cursor = connection.cursor()
cursor.execute(move_tree_query, [level_change, left_right_change,
left_right_change, new_tree_id, node.pk, parent.pk, left, right,
tree_id])
# Update the former root node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
| {
"content_hash": "1dd6fe28cfa65d4bbca60f5056dd065d",
"timestamp": "",
"source": "github",
"line_count": 1039,
"max_line_length": 119,
"avg_line_length": 40.07603464870068,
"alnum_prop": 0.5525829150555969,
"repo_name": "denys-duchier/django-mptt-py3",
"id": "5486e6bbecf15dab404320fe0844d40613c06d30",
"size": "41639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mptt/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171397"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
"""Unit tests for Motion JPEG encoder module."""
import encoder
import optimizer
import unittest
import test_tools
import mjpeg
class TestMotionJpegCodec(test_tools.FileUsingCodecTest):
def test_OneBlackFrame(self):
codec = mjpeg.MotionJpegCodec()
my_optimizer = optimizer.Optimizer(codec)
videofile = test_tools.MakeYuvFileWithOneBlankFrame(
'one_black_frame_1024_768_30.yuv')
# Motion JPEG generates a massive file, so give it a large target bitrate.
encoding = my_optimizer.BestEncoding(5000, videofile)
encoding.Execute()
self.assertLess(50.0, my_optimizer.Score(encoding))
def test_ParametersSet(self):
codec = mjpeg.MotionJpegCodec()
my_optimizer = optimizer.Optimizer(codec)
videofile = test_tools.MakeYuvFileWithOneBlankFrame(
'one_black_frame_1024_768_30.yuv')
my_encoder = encoder.Encoder(my_optimizer.context,
encoder.OptionValueSet(codec.option_set, '-qmin 1 -qmax 2',
formatter=codec.option_formatter))
encoding = my_encoder.Encoding(5000, videofile)
encoding.Execute()
self.assertLess(50.0, my_optimizer.Score(encoding))
def test_ParametersAdjusted(self):
codec = mjpeg.MotionJpegCodec()
my_optimizer = optimizer.Optimizer(codec)
my_encoder = encoder.Encoder(my_optimizer.context,
encoder.OptionValueSet(codec.option_set, '-qmin 2 -qmax 2',
formatter=codec.option_formatter))
self.assertEquals('2', my_encoder.parameters.GetValue('qmin'))
self.assertEquals('2', my_encoder.parameters.GetValue('qmax'))
# qmax is less than qmin. Should be adjusted to be above.
my_encoder = encoder.Encoder(my_optimizer.context,
encoder.OptionValueSet(codec.option_set, '-qmin 3 -qmax 2',
formatter=codec.option_formatter))
self.assertEquals('3', my_encoder.parameters.GetValue('qmin'))
self.assertEquals('3', my_encoder.parameters.GetValue('qmax'))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8b99e45f8aeb9a01bfa5e923bcaac001",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 39.76470588235294,
"alnum_prop": 0.6923076923076923,
"repo_name": "google/compare-codecs",
"id": "e039ab232aaae8b590148e9a9d27a6f4df438f7b",
"size": "2616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/mjpeg_unittest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3991"
},
{
"name": "CSS",
"bytes": "8583"
},
{
"name": "HTML",
"bytes": "17382"
},
{
"name": "JavaScript",
"bytes": "10932"
},
{
"name": "Python",
"bytes": "335186"
},
{
"name": "Shell",
"bytes": "22365"
}
],
"symlink_target": ""
} |
"""Tests for research.carls.models.caml.sparse_features lib."""
from absl.testing import parameterized
from research.carls.models.caml import sparse_features
from research.carls.testing import test_util
import numpy as np
import tensorflow as tf
class FeatureEmbeddingTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(FeatureEmbeddingTest, self).setUp()
self._config = test_util.default_de_config(2)
self._service_server = test_util.start_kbs_server()
self._kbs_address = 'localhost:%d' % self._service_server.port()
def tearDown(self):
self._service_server.Terminate()
super(FeatureEmbeddingTest, self).tearDown()
@parameterized.named_parameters(('with_sigma', 5), ('without_sigma', 0))
def test_partitioned_dynamic_embedding_lookup_1D_input(self, sigma_dimension):
emb_dim = 5 + sigma_dimension
config = test_util.default_de_config(emb_dim, [1] * emb_dim)
embed, sigma = sparse_features._partitioned_dynamic_embedding_lookup(
['input1', 'input2'],
config,
5,
sigma_dimension,
'feature_name0_%d' % sigma_dimension,
service_address=self._kbs_address)
if sigma_dimension > 0:
self.assertEqual((2, sigma_dimension), sigma.shape)
self.assertEqual((2, 5), embed.shape)
else:
self.assertAllClose([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]], embed.numpy())
self.assertIsNone(sigma)
@parameterized.named_parameters(('with_sigma', 3), ('without_sigma', 0))
def test_partitioned_dynamic_embedding_lookup_2D_input(self, sigma_dimension):
emb_dim = 5 + sigma_dimension
config = test_util.default_de_config(emb_dim, [1] * emb_dim)
emb, sigma = sparse_features._partitioned_dynamic_embedding_lookup(
[['input1', ''], ['input2', 'input3']],
config,
5,
sigma_dimension,
'feature_name1_%d' % sigma_dimension,
service_address=self._kbs_address)
if sigma_dimension > 0:
self.assertEqual((2, 2, sigma_dimension), sigma.shape)
self.assertEqual((2, 2, 5), emb.shape)
self.assertAllClose([[[1] * sigma_dimension, [0] * sigma_dimension],
[[1] * sigma_dimension, [1] * sigma_dimension]],
sigma.numpy())
self.assertAllClose([[[1] * 5, [0] * 5], [[1] * 5, [1] * 5]], emb.numpy())
else:
self.assertAllClose([[[1] * 5, [0] * 5], [[1] * 5, [1] * 5]], emb.numpy())
self.assertIsNone(sigma)
@parameterized.named_parameters(('with_sigma', 3), ('without_sigma', 0))
def test_embed_single_feature_1D_input(self, sigma_dimension):
emb_dim = 5 + sigma_dimension
config = test_util.default_de_config(emb_dim, [1] * emb_dim)
emb, vc, sigma, input_embed, variables = sparse_features.embed_single_feature(
['input1', 'input2'],
config,
5,
sigma_dimension,
'feature_name2_%d' % sigma_dimension,
service_address=self._kbs_address)
if sigma_dimension > 0:
self.assertIsNotNone(variables)
self.assertEqual((2, 5), emb.shape)
self.assertEqual(5, vc.shape)
self.assertEqual((2, 1), sigma.shape)
self.assertEqual((2, 5), input_embed.shape)
else:
self.assertAllClose([[1] * 5, [1] * 5], emb.numpy())
self.assertIsNone(vc)
self.assertIsNone(sigma)
self.assertAllClose([[1] * 5, [1] * 5], input_embed)
# Lookup again with given variables. Checks all values are the same.
new_emb, new_vc, new_sigma, new_input_embed, variables = (
sparse_features.embed_single_feature(['input1', 'input2'],
config,
5,
sigma_dimension,
'feature_name2_%d' %
sigma_dimension,
variables=variables,
service_address=self._kbs_address))
if sigma_dimension > 0:
self.assertIsNotNone(variables)
self.assertAllClose(emb.numpy(), new_emb.numpy())
if vc is not None:
self.assertAllClose(vc.numpy(), new_vc.numpy())
if sigma is not None:
self.assertAllClose(sigma.numpy(), new_sigma.numpy())
self.assertAllClose(input_embed.numpy(), new_input_embed.numpy())
@parameterized.named_parameters(('with_sigma', 3), ('without_sigma', 0))
def test_embed_single_feature_2D_input(self, sigma_dimension):
emb_dim = 5 + sigma_dimension
config = test_util.default_de_config(emb_dim, [1] * emb_dim)
emb, vc, sigma, input_embed, var = sparse_features.embed_single_feature(
[['input1', ''], ['input2', 'input3']],
config,
5,
sigma_dimension,
'feature_name3_%d' % sigma_dimension,
service_address=self._kbs_address)
if sigma_dimension > 0:
self.assertIsNotNone(var)
self.assertEqual((2, 5), emb.shape)
self.assertEqual(5, vc.shape)
self.assertEqual((2, 2), sigma.shape)
self.assertEqual((2, 2, 5), input_embed.shape)
else:
self.assertAllClose([[1] * 5, [1] * 5], emb)
self.assertIsNone(vc)
self.assertIsNone(sigma)
self.assertEqual((2, 2, 5), input_embed.shape)
@parameterized.named_parameters(('with_sigma', 3), ('without_sigma', 0))
def test_single_feature_lookup_1D(self, sigma_dimension):
emb_dim = 5 + sigma_dimension
config = test_util.default_de_config(emb_dim, [1] * emb_dim)
fea_embed = sparse_features.SparseFeatureEmbedding(
config, {'fea': (5, sigma_dimension)},
op_name='single_feature_%d' % sigma_dimension,
service_address=self._kbs_address)
embed, _, _, embed_map = fea_embed.lookup(['input1', 'input2'])
if sigma_dimension > 0:
self.assertEqual((2, 5), embed.shape)
else:
self.assertAllClose([[1] * 5, [1] * 5], embed)
self.assertEqual(['fea'], list(embed_map.keys()))
self.assertEqual((2, 5), embed_map['fea'].shape)
self.assertEqual(['fea'], list(fea_embed._variable_map.keys()))
@parameterized.named_parameters(('with_sigma', 3), ('without_sigma', 0))
def test_single_feature_lookup_2D(self, sigma_dimension):
emb_dim = 5 + sigma_dimension
config = test_util.default_de_config(emb_dim, [1] * emb_dim)
fea_embed = sparse_features.SparseFeatureEmbedding(
config, {'fea': (5, sigma_dimension)},
op_name='single_feature_%d' % sigma_dimension,
service_address=self._kbs_address)
embed, _, _, embed_map = fea_embed.lookup([['input1', ''],
['input2', 'input3']])
if sigma_dimension > 0:
self.assertEqual((2, 5), embed.shape)
else:
self.assertAllClose([[1] * 5, [1] * 5], embed)
self.assertEqual(['fea'], list(embed_map.keys()))
self.assertEqual((2, 2, 5), embed_map['fea'].shape)
self.assertEqual(['fea'], list(fea_embed._variable_map.keys()))
def test_multiple_feature_lookup_1D_with_sigma(self):
fea_embed = sparse_features.SparseFeatureEmbedding(
self._config, {
'fea1': (5, 1),
'fea2': (10, 1)
},
op_name='multiple_feature0',
service_address=self._kbs_address)
embed, _, _, embed_map = fea_embed.lookup({
'fea1': ['input1', 'input2'],
'fea2': ['input3', 'input4']
})
self.assertEqual((2, 15), embed.shape)
self.assertLen(embed_map.keys(), 2)
self.assertIn('fea1', embed_map.keys())
self.assertIn('fea2', embed_map.keys())
self.assertEqual((2, 5), embed_map['fea1'].shape)
self.assertEqual((2, 10), embed_map['fea2'].shape)
self.assertLen(fea_embed._variable_map.keys(), 2)
self.assertIn('fea1', fea_embed._variable_map.keys())
self.assertIn('fea2', fea_embed._variable_map.keys())
def test_multiple_feature_lookup_1D_without_sigma(self):
config = test_util.default_de_config(5, [1] * 5)
fea_embed = sparse_features.SparseFeatureEmbedding(
config, {
'fea1': (5, 0),
'fea2': (5, 0)
},
op_name='multiple_feature1',
service_address=self._kbs_address)
embed, _, _, embed_map = fea_embed.lookup({
'fea1': ['input1', 'input2'],
'fea2': ['input3', 'input4']
})
self.assertAllClose([[1] * 10, [1] * 10], embed.numpy())
self.assertLen(embed_map.keys(), 2)
self.assertIn('fea1', embed_map.keys())
self.assertIn('fea2', embed_map.keys())
self.assertEqual((2, 5), embed_map['fea1'].shape)
self.assertEqual((2, 5), embed_map['fea2'].shape)
self.assertLen(fea_embed._variable_map.keys(), 2)
self.assertIn('fea1', fea_embed._variable_map.keys())
self.assertIn('fea2', fea_embed._variable_map.keys())
def test_multiple_feature_lookup_2D_with_sigma(self):
fea_embed = sparse_features.SparseFeatureEmbedding(
self._config, {
'fea1': (5, 1),
'fea2': (10, 1)
},
op_name='multiple_feature2',
service_address=self._kbs_address)
embed, _, _, embed_map = fea_embed.lookup({
'fea1': [['input1', ''], ['input2', '']],
'fea2': [['input3', 'input5'], ['input4', 'input6']]
})
self.assertEqual((2, 15), embed.shape)
self.assertLen(embed_map.keys(), 2)
self.assertIn('fea1', embed_map.keys())
self.assertIn('fea2', embed_map.keys())
self.assertEqual((2, 2, 5), embed_map['fea1'].shape)
self.assertEqual((2, 2, 10), embed_map['fea2'].shape)
self.assertLen(fea_embed._variable_map.keys(), 2)
self.assertIn('fea1', fea_embed._variable_map.keys())
self.assertIn('fea2', fea_embed._variable_map.keys())
def test_multiple_feature_lookup_2D_without_sigma(self):
config = test_util.default_de_config(5, [1] * 5)
fea_embed = sparse_features.SparseFeatureEmbedding(
config, {
'fea1': (5, 0),
'fea2': (5, 0)
},
op_name='multiple_feature3',
service_address=self._kbs_address)
embed, _, _, embed_map = fea_embed.lookup({
'fea1': [['input1', ''], ['input2', '']],
'fea2': [['input3', 'input5'], ['input4', 'input6']]
})
self.assertAllClose([[1] * 10, [1] * 10], embed.numpy())
self.assertLen(embed_map.keys(), 2)
self.assertIn('fea1', embed_map.keys())
self.assertIn('fea2', embed_map.keys())
self.assertEqual((2, 2, 5), embed_map['fea1'].shape)
self.assertEqual((2, 2, 5), embed_map['fea2'].shape)
self.assertLen(fea_embed._variable_map.keys(), 2)
self.assertIn('fea1', fea_embed._variable_map.keys())
self.assertIn('fea2', fea_embed._variable_map.keys())
def test_training_logistic(self):
self._config.gradient_descent_config.learning_rate = 0.05
fea_embed = sparse_features.SparseFeatureEmbedding(
self._config, {
'weather': (10, 2),
'day_of_week': (10, 2)
},
op_name='multiple_feature',
service_address=self._kbs_address)
model = tf.keras.models.Sequential(
[fea_embed, tf.keras.layers.Dense(2, activation='softmax')])
model.compile(
optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
# Create an optimizer.
optimizer = tf.keras.optimizers.SGD(learning_rate=0.05)
x_train = {
'weather': [['cold', 'day'], ['hot', ''], ['warm', 'day'], ['warm',
'']],
'day_of_week': [['monday', 'day'], ['tuesday', 'day'], ['sunday', ''],
['saturday', '']],
}
y_train = np.array([[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0]])
# Test the shape of model's output.
self.assertEqual((4, 2), model(x_train).shape)
loss_layer = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
logits = model(x_train)
init_loss = loss_layer(logits, y_train)
for _ in range(10):
with tf.GradientTape() as tape:
logits = model(x_train)
loss = loss_layer(logits, y_train)
grads = tape.gradient(loss, model.trainable_weights)
# Update the trainable variables w.r.t. the logistic loss
optimizer.apply_gradients(zip(grads, model.trainable_weights))
print('===>loss: ', loss_layer(logits, y_train).numpy())
# Checks the loss is dropped after 10 steps of training.
logits = model(x_train)
final_loss = loss_layer(logits, y_train)
self.assertLess(final_loss.numpy(), init_loss.numpy())
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "1df8aaef210d37590e3a5925f38614f7",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 82,
"avg_line_length": 41.50662251655629,
"alnum_prop": 0.5926605504587156,
"repo_name": "tensorflow/neural-structured-learning",
"id": "a03c37c4ccddcb1800869427d5f37e8ce0ad3b91",
"size": "13110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/carls/models/caml/sparse_features_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "550872"
},
{
"name": "Jupyter Notebook",
"bytes": "374227"
},
{
"name": "Python",
"bytes": "1336902"
},
{
"name": "Shell",
"bytes": "3900"
},
{
"name": "Starlark",
"bytes": "102744"
}
],
"symlink_target": ""
} |
TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
# We only run the cloud run tests in py39 session.
"ignored_versions": ["2.7", "3.6", "3.7", "3.8"],
"enforce_type_hints": True,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
| {
"content_hash": "cc25ec1d6786768173915d9c8cd96fc2",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 49.4,
"alnum_prop": 0.6747638326585695,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "07aae85e9d0b1549ef7b111c2972455ac55e6430",
"size": "1575",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "run/idp-sql/noxfile_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
from django.views.generic.base import View
from django.http.response import HttpResponseRedirect
from django.urls import reverse
from django.utils import translation
class ChangeLanguage(View):
def get(self, request, code):
if translation.check_for_language(code):
request.session[translation.LANGUAGE_SESSION_KEY] = code
translation.activate(code)
return HttpResponseRedirect(reverse('www:index'))
| {
"content_hash": "99dff0d42060165bfe12b09ae7b7b572",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 37.083333333333336,
"alnum_prop": 0.7438202247191011,
"repo_name": "WebArchivCZ/Seeder",
"id": "37446fa54d5b75ff9923d8f21d9801f0730fd87c",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Seeder/www/views_non_localized.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40916"
},
{
"name": "HTML",
"bytes": "191411"
},
{
"name": "JavaScript",
"bytes": "35092"
},
{
"name": "PHP",
"bytes": "996"
},
{
"name": "Python",
"bytes": "298522"
},
{
"name": "Shell",
"bytes": "691"
}
],
"symlink_target": ""
} |
from django.core import validators
from django.utils.translation import gettext_lazy as _
no_slash_validator = validators.RegexValidator(r'^(?u)[^/]+$',
_("Slash is not an allowed "
"character."),
code="noslash")
| {
"content_hash": "2ec8a09342c5e19f6b7c37dc5bcb3254",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 44.75,
"alnum_prop": 0.43575418994413406,
"repo_name": "openstack/horizon",
"id": "a3427823de397afabc1bc77f706bf2a5cd08b4c0",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/containers/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "583449"
},
{
"name": "JavaScript",
"bytes": "2585531"
},
{
"name": "Python",
"bytes": "5370605"
},
{
"name": "SCSS",
"bytes": "133237"
},
{
"name": "Shell",
"bytes": "6526"
}
],
"symlink_target": ""
} |
import unittest
from semantic_version import Version, SimpleSpec, NpmSpec
from .setup_django import django_loaded
if django_loaded: # pragma: no cover
from semantic_version import django_fields
from .django_test_app import models
from django.core import serializers
from django.core.management import call_command
from django.db import connection
from django.test import TestCase as DjangoTestCase
from django.test import TransactionTestCase
from django.test import runner as django_test_runner
from django.test import utils as django_test_utils
else:
DjangoTestCase = unittest.TestCase
TransactionTestCase = unittest.TestCase
test_state = {}
def setUpModule():
if not django_loaded: # pragma: no cover
raise unittest.SkipTest("Django not installed")
django_test_utils.setup_test_environment()
runner = django_test_runner.DiscoverRunner()
runner_state = runner.setup_databases()
test_state.update({
'runner': runner,
'runner_state': runner_state,
})
def tearDownModule():
if not django_loaded: # pragma: no cover
return
runner = test_state['runner']
runner_state = test_state['runner_state']
runner.teardown_databases(runner_state)
django_test_utils.teardown_test_environment()
# the refresh_from_db method only came in with 1.8, so in order to make this
# work will all supported versions we have our own function.
def save_and_refresh(obj):
"""Saves an object, and refreshes from the database."""
obj.save()
obj = obj.__class__.objects.get(id=obj.id)
@unittest.skipIf(not django_loaded, "Django not installed")
class DjangoFieldTestCase(unittest.TestCase):
def test_version(self):
obj = models.VersionModel(
version=Version('0.1.1'),
spec=SimpleSpec('==0.1.1,!=0.1.1-alpha'),
npm_spec=NpmSpec('1.2 - 2.3'),
)
self.assertEqual(Version('0.1.1'), obj.version)
self.assertEqual(SimpleSpec('==0.1.1,!=0.1.1-alpha'), obj.spec)
self.assertEqual(NpmSpec('1.2 - 2.3'), obj.npm_spec)
alt_obj = models.VersionModel(version=obj.version, spec=obj.spec, npm_spec=obj.npm_spec)
self.assertEqual(Version('0.1.1'), alt_obj.version)
self.assertEqual(SimpleSpec('==0.1.1,!=0.1.1-alpha'), alt_obj.spec)
self.assertEqual(obj.spec, alt_obj.spec)
self.assertEqual(obj.npm_spec, alt_obj.npm_spec)
self.assertEqual(obj.version, alt_obj.version)
def test_version_clean(self):
"""Calling .full_clean() should convert str to Version/Spec objects."""
obj = models.VersionModel(version='0.1.1', spec='==0.1.1,!=0.1.1-alpha', npm_spec='1.x')
obj.full_clean()
self.assertEqual(Version('0.1.1'), obj.version)
self.assertEqual(SimpleSpec('==0.1.1,!=0.1.1-alpha'), obj.spec)
self.assertEqual(NpmSpec('1.x'), obj.npm_spec)
def test_version_save(self):
"""Test saving object with a VersionField."""
# first test with a null value
obj = models.PartialVersionModel()
self.assertIsNone(obj.id)
self.assertIsNone(obj.optional)
save_and_refresh(obj)
self.assertIsNotNone(obj.id)
self.assertIsNone(obj.optional)
# now set to something that is not null
version = Version('1.2.3')
obj.optional = version
save_and_refresh(obj)
self.assertEqual(obj.optional, version)
def test_spec_save(self):
"""Test saving object with a SpecField."""
# first test with a null value
obj = models.PartialVersionModel()
self.assertIsNone(obj.id)
self.assertIsNone(obj.optional_spec)
save_and_refresh(obj)
self.assertIsNotNone(obj.id)
self.assertIsNone(obj.optional_spec)
# now set to something that is not null
spec = SimpleSpec('==0,!=0.2')
obj.optional_spec = spec
save_and_refresh(obj)
self.assertEqual(obj.optional_spec, spec)
def test_partial_spec_clean(self):
obj = models.VersionModel(version='0.1.1', spec='==0,!=0.2')
obj.full_clean()
self.assertEqual(Version('0.1.1'), obj.version)
self.assertEqual(SimpleSpec('==0,!=0.2'), obj.spec)
def test_coerce_clean(self):
obj = models.CoerceVersionModel(version='0.1.1a+2', partial='23')
obj.full_clean()
self.assertEqual(Version('0.1.1-a+2'), obj.version)
self.assertEqual(Version('23', partial=True), obj.partial)
obj2 = models.CoerceVersionModel(version='23', partial='0.1.2.3.4.5/6')
obj2.full_clean()
self.assertEqual(Version('23.0.0'), obj2.version)
self.assertEqual(Version('0.1.2+3.4.5-6', partial=True), obj2.partial)
def test_invalid_input(self):
v = models.VersionModel(version='0.1.1', spec='blah')
self.assertRaises(ValueError, v.full_clean)
v2 = models.VersionModel(version='0.1', spec='==0.1.1,!=0.1.1-alpha')
self.assertRaises(ValueError, v2.full_clean)
def test_partial(self):
obj = models.PartialVersionModel(partial=Version('0.1.0'))
self.assertEqual(Version('0.1.0', partial=True), obj.partial)
self.assertIsNone(obj.optional)
self.assertIsNone(obj.optional_spec)
# Copy values to another model
alt_obj = models.PartialVersionModel(
partial=obj.partial,
optional=obj.optional,
optional_spec=obj.optional_spec,
)
self.assertEqual(Version('0.1.0', partial=True), alt_obj.partial)
self.assertEqual(obj.partial, alt_obj.partial)
self.assertIsNone(obj.optional)
self.assertIsNone(obj.optional_spec)
# Validation should be fine
obj.full_clean()
def test_serialization(self):
o1 = models.VersionModel(
version=Version('0.1.1'),
spec=SimpleSpec('==0.1.1,!=0.1.1-alpha'),
npm_spec=NpmSpec('1.2 - 2.3'),
)
o2 = models.VersionModel(
version=Version('0.4.3-rc3+build3'),
spec=SimpleSpec('<=0.1.1-rc2,!=0.1.1-rc1'),
npm_spec=NpmSpec('1.2 - 2.3'),
)
data = serializers.serialize('json', [o1, o2])
obj1, obj2 = serializers.deserialize('json', data)
self.assertEqual(o1.version, obj1.object.version)
self.assertEqual(o1.spec, obj1.object.spec)
self.assertEqual(o1.npm_spec, obj1.object.npm_spec)
self.assertEqual(o2.version, obj2.object.version)
self.assertEqual(o2.spec, obj2.object.spec)
self.assertEqual(o2.npm_spec, obj2.object.npm_spec)
def test_serialization_partial(self):
o1 = models.PartialVersionModel(
partial=Version('0.1.1', partial=True),
optional=Version('0.2.4-rc42', partial=True),
optional_spec=None,
)
o2 = models.PartialVersionModel(
partial=Version('0.4.3-rc3+build3', partial=True),
optional='',
optional_spec=SimpleSpec('==0.1.1,!=0.1.1-alpha'),
)
data = serializers.serialize('json', [o1, o2])
obj1, obj2 = serializers.deserialize('json', data)
self.assertEqual(o1.partial, obj1.object.partial)
self.assertEqual(o1.optional, obj1.object.optional)
self.assertEqual(o2.partial, obj2.object.partial)
self.assertEqual(o2.optional, obj2.object.optional)
@unittest.skipIf(not django_loaded, "Django not installed")
class FieldMigrationTests(DjangoTestCase):
def test_version_field(self):
field = django_fields.VersionField(
partial=True,
coerce=True,
)
expected = {
'coerce': True,
'partial': True,
'max_length': 200,
}
self.assertEqual(field.deconstruct()[3], expected)
def test_spec_field(self):
field = django_fields.SpecField()
expected = {'max_length': 200}
self.assertEqual(field.deconstruct()[3], expected)
def test_nondefault_spec_field(self):
field = django_fields.SpecField(syntax='npm')
expected = {'max_length': 200, 'syntax': 'npm'}
self.assertEqual(field.deconstruct()[3], expected)
@unittest.skipIf(not django_loaded, "Django not installed")
class FullMigrateTests(TransactionTestCase):
def test_migrate(self):
# Let's check that this does not crash
call_command('makemigrations', verbosity=0)
call_command('migrate', verbosity=0)
with connection.cursor() as cursor:
table_list = connection.introspection.get_table_list(cursor)
table_list = [t.name for t in connection.introspection.get_table_list(cursor)]
self.assertIn('django_test_app_versionmodel', table_list)
@unittest.skipIf(not django_loaded, "Django not installed")
class DbInteractingTestCase(DjangoTestCase):
def test_db_interaction(self):
o1 = models.VersionModel(version=Version('0.1.1'), spec=SimpleSpec('<0.2.4-rc42'))
o2 = models.VersionModel(version=Version('0.4.3-rc3+build3'), spec=SimpleSpec('==0.4.3'))
o1.save()
o2.save()
obj1 = models.VersionModel.objects.get(pk=o1.pk)
self.assertEqual(o1.version, obj1.version)
obj2 = models.VersionModel.objects.get(pk=o2.pk)
self.assertEqual(o2.version, obj2.version)
def test_get_or_create(self):
o1, created = models.VersionModel.objects.get_or_create(version=Version('0.1.1'), spec=SimpleSpec('==0.4.3'))
self.assertTrue(created)
self.assertIsNotNone(o1.pk)
self.assertEqual(Version('0.1.1'), o1.version)
self.assertEqual(SimpleSpec('==0.4.3'), o1.spec)
o2, created = models.VersionModel.objects.get_or_create(version=Version('0.1.1'), spec=SimpleSpec('==0.4.3'))
self.assertFalse(created)
self.assertEqual(o1, o2)
self.assertEqual(o1.pk, o2.pk)
self.assertEqual(Version('0.1.1'), o2.version)
self.assertEqual(SimpleSpec('==0.4.3'), o2.spec)
| {
"content_hash": "ea4cff7a9d8a71a8ff945ce40313333a",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 117,
"avg_line_length": 36.89010989010989,
"alnum_prop": 0.6349915599245358,
"repo_name": "rbarrois/python-semanticversion",
"id": "3361a9bf0bd41c2871b3d79a8b14652b198535f7",
"size": "10208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_django.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2280"
},
{
"name": "Python",
"bytes": "111378"
}
],
"symlink_target": ""
} |
import os
import sys
import yaml
from fabric.api import *
class Config(object):
def __init__(self, configfile):
self._config = self.__set_config(configfile)
self._roles = self.__set_roles(configfile)
self._servers = self.__set_servers(configfile)
self.__prepare()
if os.environ["DEBUG"] <> "":
for server in self._servers:
sys.stderr.write("server: %s\n" % server)
for kv in self._servers[server]:
sys.stderr.write("__init__ server: [[%s]] - key: [[%s]] - value: [[%s]]\n" % (server, kv, self._servers[server][kv]))
self.__setup_fabric_env()
if os.environ["DEBUG"] <> "":
self.__dumpconfig()
def __dumpconfig(self):
for role in sorted(self._roles):
sys.stderr.write("role: %s\n" % role)
sys.stderr.write("\n")
for server in self._servers:
sys.stderr.write("server: %s\n" % server)
for kv in self._servers[server]:
sys.stderr.write("__dumpconfig server: [[%s]] - key: [[%s]] - value: [[%s]]\n" % (server, kv, self._servers[server][kv]))
for server in sorted(self._servers):
if 'ip' in self._servers[server]:
sys.stderr.write("server: %s (%s)\n" % (server, self._servers[server]['ip']))
else:
sys.stderr.write("ERROR: server %s has no ip property\n" % server)
sys.exit(1)
for role in sorted(self._roles):
if server in self._roles[role]:
sys.stderr.write("server role: %s\n" % role)
sys.stderr.write("\n")
@classmethod
def __read_from_yaml(cls, yamlfile, section_name):
with open(yamlfile, 'r') as yaml_file:
yamldata = yaml.load(yaml_file.read())
if yamldata and section_name in yamldata:
if os.environ["DEBUG"] <> "":
print
print yamldata[section_name]
print
return yamldata[section_name]
else:
return {}
def __set_config(self, configfile):
return self.__read_from_yaml(configfile, 'config')
def __set_roles(self, configfile):
return self.__read_from_yaml(configfile, 'roles')
def __set_servers(self, configfile):
return self.__read_from_yaml(configfile, 'servers')
def __prepare_config(self):
defaults = {}
defaults["nameserver"] = "8.8.8.8"
defaults["parallel"] = True
defaults["underlay_mtu"] = 9000
defaults["overlay_mtu"] = 8950
defaults["iperf3_througput_test_sequence"] = "400M 40M 4M"
defaults["iperf3_test_iterations"] = "2 4 6"
defaults["iperf3_retry_factor"] = 1000
defaults["iperf3_retry_wait"] = 10
if self._config['debug'] == True:
defaults["maxpairs"] = 20
else:
defaults["maxpairs"] = 1000
for overloading_key in defaults:
if overloading_key not in self._config:
overloading_value = defaults[overloading_key]
self._config[overloading_key] = overloading_value
def __prepare_roles(self):
self._roles['all_servers'] = []
for server in self._servers:
if server not in self._roles['all_servers']:
self._roles['all_servers'].append(server)
def __prepare_servers(self):
for role in self._roles:
for server in self._roles[role]:
if server not in self._servers:
self._servers[server] = {}
if 'roles' not in self._servers[server]:
self._servers[server]['roles'] = []
if role not in self._servers[server]['roles']:
self._servers[server]['roles'].append(role)
for server in self._servers:
for global_config_key in self._config:
if global_config_key not in self._servers[server]:
value = self._config[global_config_key]
self._servers[server][global_config_key] = value
def __prepare(self):
self.__prepare_config()
self.__prepare_roles()
self.__prepare_servers()
def __setup_fabric_env(self):
env.use_ssh_config = True
env.port = 22
env.connection_attempts = 5
env.timeout = 5
env.parallel = self._config["parallel"]
env.roledefs = self._roles
@property
def config(self):
return self._config
@property
def roles(self):
return self._roles
@property
def servers(self):
return self._servers
| {
"content_hash": "d504e5f1a9a543c2b441bd2b9e651653",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 137,
"avg_line_length": 32.81818181818182,
"alnum_prop": 0.5399531216705732,
"repo_name": "midonet/senbazuru",
"id": "7d5e1845fd334a33bd3f604da5134efea6edd4aa",
"size": "5327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/senbazuru/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5663"
},
{
"name": "Python",
"bytes": "49622"
}
],
"symlink_target": ""
} |
"""
WSGI config for idneo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "idneo.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "a4a3809e57f232a132ea94069e50d23b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.5,
"alnum_prop": 0.7714285714285715,
"repo_name": "pollitosabroson/idneo",
"id": "37eba92157a4264a275efb6bf7bb034bb674d29d",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/idneo/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5984"
},
{
"name": "Python",
"bytes": "18427"
},
{
"name": "Shell",
"bytes": "4961"
}
],
"symlink_target": ""
} |
__version__=''' $Id$ '''
__doc__='''Classes for ParagraphStyle and similar things.
A style is a collection of attributes, but with some extra features
to allow 'inheritance' from a parent, and to ensure nobody makes
changes after construction.
ParagraphStyle shows all the attributes available for formatting
paragraphs.
getSampleStyleSheet() returns a stylesheet you can use for initial
development, with a few basic heading and text styles.
'''
__all__=(
'PropertySet',
'ParagraphStyle',
'LineStyle',
'ListStyle',
'StyleSheet1',
'getSampleStyleSheet',
)
from reportlab.lib.colors import white, black
from reportlab.lib.enums import TA_LEFT, TA_CENTER
from reportlab.lib.fonts import tt2ps
from reportlab.rl_config import canvas_basefontname as _baseFontName, baseUnderlineProportion as _baseUnderlineProportion
_baseFontNameB = tt2ps(_baseFontName,1,0)
_baseFontNameI = tt2ps(_baseFontName,0,1)
_baseFontNameBI = tt2ps(_baseFontName,1,1)
###########################################################
# This class provides an 'instance inheritance'
# mechanism for its descendants, simpler than acquisition
# but not as far-reaching
###########################################################
class PropertySet:
defaults = {}
def __init__(self, name, parent=None, **kw):
"""When initialized, it copies the class defaults;
then takes a copy of the attributes of the parent
if any. All the work is done in init - styles
should cost little to use at runtime."""
# step one - validate the hell out of it
assert 'name' not in self.defaults, "Class Defaults may not contain a 'name' attribute"
assert 'parent' not in self.defaults, "Class Defaults may not contain a 'parent' attribute"
if parent:
assert parent.__class__ == self.__class__, "Parent style %s must have same class as new style %s" % (parent.__class__.__name__,self.__class__.__name__)
#step two
self.name = name
self.parent = parent
self.__dict__.update(self.defaults)
#step two - copy from parent if any. Try to be
# very strict that only keys in class defaults are
# allowed, so they cannot inherit
self.refresh()
self._setKwds(**kw)
def _setKwds(self,**kw):
#step three - copy keywords if any
for key, value in kw.items():
self.__dict__[key] = value
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.name)
def refresh(self):
"""re-fetches attributes from the parent on demand;
use if you have been hacking the styles. This is
used by __init__"""
if self.parent:
for key, value in self.parent.__dict__.items():
if (key not in ['name','parent']):
self.__dict__[key] = value
def listAttrs(self, indent=''):
print(indent + 'name =', self.name)
print(indent + 'parent =', self.parent)
keylist = list(self.__dict__.keys())
keylist.sort()
keylist.remove('name')
keylist.remove('parent')
for key in keylist:
value = self.__dict__.get(key, None)
print(indent + '%s = %s' % (key, value))
def clone(self, name, parent=None, **kwds):
r = self.__class__(name,parent)
r.__dict__ = self.__dict__.copy()
r.name = name
r.parent = parent is None and self or parent
r._setKwds(**kwds)
return r
class ParagraphStyle(PropertySet):
defaults = {
'fontName':_baseFontName,
'fontSize':10,
'leading':12,
'leftIndent':0,
'rightIndent':0,
'firstLineIndent':0,
'alignment':TA_LEFT,
'spaceBefore':0,
'spaceAfter':0,
'bulletFontName':_baseFontName,
'bulletFontSize':10,
'bulletIndent':0,
#'bulletColor':black,
'textColor': black,
'backColor':None,
'wordWrap':None, #None means do nothing special
#CJK use Chinese Line breaking
#LTR RTL use left to right / right to left
#with support from pyfribi2 if available
'borderWidth': 0,
'borderPadding': 0,
'borderColor': None,
'borderRadius': None,
'allowWidows': 1,
'allowOrphans': 0,
'textTransform':None, #uppercase lowercase (captitalize not yet) or None or absent
'endDots':None, #dots on the last line of left/right justified paras
#string or object with text and optional fontName, fontSize, textColor & backColor
#dy
'splitLongWords':1, #make best efforts to split long words
'underlineProportion': _baseUnderlineProportion, #set to non-zero to get proportional
'bulletAnchor': 'start', #where the bullet is anchored ie start, middle, end or numeric
'justifyLastLine': 0, #n allow justification on the last line for more than n words 0 means don't bother
'justifyBreaks': 0, #justify lines broken with <br/>
}
class LineStyle(PropertySet):
defaults = {
'width':1,
'color': black
}
def prepareCanvas(self, canvas):
"""You can ask a LineStyle to set up the canvas for drawing
the lines."""
canvas.setLineWidth(1)
#etc. etc.
class ListStyle(PropertySet):
defaults = dict(
leftIndent=18,
rightIndent=0,
bulletAlign='left',
bulletType='1',
bulletColor=black,
bulletFontName='Helvetica',
bulletFontSize=12,
bulletOffsetY=0,
bulletDedent='auto',
bulletDir='ltr',
bulletFormat=None,
start=None, #starting value for a list
)
_stylesheet1_undefined = object()
class StyleSheet1:
"""
This may or may not be used. The idea is to:
1. slightly simplify construction of stylesheets;
2. enforce rules to validate styles when added
(e.g. we may choose to disallow having both
'heading1' and 'Heading1' - actual rules are
open to discussion);
3. allow aliases and alternate style lookup
mechanisms
4. Have a place to hang style-manipulation
methods (save, load, maybe support a GUI
editor)
Access is via getitem, so they can be
compatible with plain old dictionaries.
"""
def __init__(self):
self.byName = {}
self.byAlias = {}
def __getitem__(self, key):
try:
return self.byAlias[key]
except KeyError:
try:
return self.byName[key]
except KeyError:
raise KeyError("Style '%s' not found in stylesheet" % key)
def get(self,key,default=_stylesheet1_undefined):
try:
return self[key]
except KeyError:
if default!=_stylesheet1_undefined: return default
raise
def __contains__(self, key):
return key in self.byAlias or key in self.byName
def has_key(self,key):
return key in self
def add(self, style, alias=None):
key = style.name
if key in self.byName:
raise KeyError("Style '%s' already defined in stylesheet" % key)
if key in self.byAlias:
raise KeyError("Style name '%s' is already an alias in stylesheet" % key)
if alias:
if alias in self.byName:
raise KeyError("Style '%s' already defined in stylesheet" % alias)
if alias in self.byAlias:
raise KeyError("Alias name '%s' is already an alias in stylesheet" % alias)
#passed all tests? OK, add it
self.byName[key] = style
if alias:
self.byAlias[alias] = style
def list(self):
styles = list(self.byName.items())
styles.sort()
alii = {}
for (alias, style) in list(self.byAlias.items()):
alii[style] = alias
for (name, style) in styles:
alias = alii.get(style, None)
print(name, alias)
style.listAttrs(' ')
print()
def testStyles():
pNormal = ParagraphStyle('Normal',None)
pNormal.fontName = _baseFontName
pNormal.fontSize = 12
pNormal.leading = 14.4
pNormal.listAttrs()
print()
pPre = ParagraphStyle('Literal', pNormal)
pPre.fontName = 'Courier'
pPre.listAttrs()
return pNormal, pPre
def getSampleStyleSheet():
"""Returns a stylesheet object"""
stylesheet = StyleSheet1()
stylesheet.add(ParagraphStyle(name='Normal',
fontName=_baseFontName,
fontSize=10,
leading=12)
)
stylesheet.add(ParagraphStyle(name='BodyText',
parent=stylesheet['Normal'],
spaceBefore=6)
)
stylesheet.add(ParagraphStyle(name='Italic',
parent=stylesheet['BodyText'],
fontName = _baseFontNameI)
)
stylesheet.add(ParagraphStyle(name='Heading1',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=18,
leading=22,
spaceAfter=6),
alias='h1')
stylesheet.add(ParagraphStyle(name='Title',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=18,
leading=22,
alignment=TA_CENTER,
spaceAfter=6),
alias='title')
stylesheet.add(ParagraphStyle(name='Heading2',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=14,
leading=18,
spaceBefore=12,
spaceAfter=6),
alias='h2')
stylesheet.add(ParagraphStyle(name='Heading3',
parent=stylesheet['Normal'],
fontName = _baseFontNameBI,
fontSize=12,
leading=14,
spaceBefore=12,
spaceAfter=6),
alias='h3')
stylesheet.add(ParagraphStyle(name='Heading4',
parent=stylesheet['Normal'],
fontName = _baseFontNameBI,
fontSize=10,
leading=12,
spaceBefore=10,
spaceAfter=4),
alias='h4')
stylesheet.add(ParagraphStyle(name='Heading5',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=9,
leading=10.8,
spaceBefore=8,
spaceAfter=4),
alias='h5')
stylesheet.add(ParagraphStyle(name='Heading6',
parent=stylesheet['Normal'],
fontName = _baseFontNameB,
fontSize=7,
leading=8.4,
spaceBefore=6,
spaceAfter=2),
alias='h6')
stylesheet.add(ParagraphStyle(name='Bullet',
parent=stylesheet['Normal'],
firstLineIndent=0,
spaceBefore=3),
alias='bu')
stylesheet.add(ParagraphStyle(name='Definition',
parent=stylesheet['Normal'],
firstLineIndent=0,
leftIndent=36,
bulletIndent=0,
spaceBefore=6,
bulletFontName=_baseFontNameBI),
alias='df')
stylesheet.add(ParagraphStyle(name='Code',
parent=stylesheet['Normal'],
fontName='Courier',
fontSize=8,
leading=8.8,
firstLineIndent=0,
leftIndent=36))
return stylesheet
| {
"content_hash": "4287bc8bcdf6d6ae8fd0727311072289",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 163,
"avg_line_length": 37.07282913165266,
"alnum_prop": 0.4989044200982244,
"repo_name": "Distrotech/reportlab",
"id": "cb16f5531449fc22a1ec3dccd4e7bf93cb8c10e2",
"size": "13418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/reportlab/lib/styles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "721758"
},
{
"name": "C++",
"bytes": "668"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2988317"
},
{
"name": "Shell",
"bytes": "2506"
}
],
"symlink_target": ""
} |
from corehq.apps.sms.models import MessagingSubEvent, MessagingEvent
from corehq.util.metrics import metrics_counter
def handle_email_messaging_subevent(message, subevent_id):
try:
subevent = MessagingSubEvent.objects.get(id=subevent_id)
except MessagingSubEvent.DoesNotExist:
return
event_type = message.get('eventType')
if event_type == 'Bounce':
additional_error_text = ''
bounce_type = message.get('bounce', {}).get('bounceType')
if bounce_type:
additional_error_text = f"{bounce_type}."
bounced_recipients = message.get('bounce', {}).get('bouncedRecipients',
[])
recipient_addresses = []
for bounced_recipient in bounced_recipients:
recipient_addresses.append(bounced_recipient.get('emailAddress'))
if recipient_addresses:
additional_error_text = f"{additional_error_text} - {', '.join(recipient_addresses)}"
metrics_counter('commcare.messaging.email.bounced', len(bounced_recipients), tags={
'domain': subevent.parent.domain,
})
subevent.error(MessagingEvent.ERROR_EMAIL_BOUNCED,
additional_error_text=additional_error_text)
elif event_type == 'Send':
subevent.status = MessagingEvent.STATUS_EMAIL_SENT
elif event_type == 'Delivery':
subevent.status = MessagingEvent.STATUS_EMAIL_DELIVERED
subevent.additional_error_text = message.get('delivery', {}).get(
'timestamp')
subevent.save()
| {
"content_hash": "d3755c0951d725652cc55a86ea4b8be0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 97,
"avg_line_length": 41.76315789473684,
"alnum_prop": 0.632010081915564,
"repo_name": "dimagi/commcare-hq",
"id": "b8e4b5b036c4a2ca25b9d31f58e6b4bd9507e1c9",
"size": "1587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/sms/event_handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
"""Build attributes of every testcase."""
import datetime
import sys
import six
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from local.butler.scripts import attribute_builder
from local.butler.scripts import batcher
BATCH_SIZE = 500
def to_dict(entity):
"""Convert a db.Model instance to a dict."""
entity_dict = entity.to_dict()
entity_dict['id'] = entity.key.id()
for k, v in six.iteritems(entity_dict):
if isinstance(v, datetime.datetime):
entity_dict[k] = utils.utc_datetime_to_timestamp(v)
return entity_dict
def get_diff(before, after):
"""Return differences in string between the two dicts, before and after."""
diffs = []
for k, v in six.iteritems(before):
if k in after:
if v != after[k]:
diffs.append((k, (v, after[k])))
else:
diffs.append((k, (v, '<MISSING>')))
for k, v in six.iteritems(after):
if k not in before:
diffs.append((k, ('<MISSING>', v)))
diffs.sort()
s = ''
for (key, (before_value, after_value)) in diffs:
s += '%s:\n' % key
s += '-%s\n' % before_value
s += '+%s\n\n' % after_value
return s
def execute(args):
"""Build keywords."""
count_diff = 0
query = data_types.Testcase.query().order(-data_types.Testcase.timestamp)
for testcases in batcher.iterate(query, BATCH_SIZE):
for testcase in testcases:
before_testcase = to_dict(testcase)
attribute_builder.populate(testcase)
after_testcase = to_dict(testcase)
diff = get_diff(before_testcase, after_testcase)
if (count_diff % 10) == 0 and diff:
print('Migrate (dry=%s) id:%s\n%s' % (not args.non_dry_run,
testcase.key.id(), diff))
if diff:
count_diff += 1
if args.non_dry_run:
try:
ndb_utils.put_multi(testcases)
except Exception:
for testcase in testcases:
try:
testcase.put()
except Exception:
print('Error: %s %s' % (testcase.key.id(), sys.exc_info()))
print('Done (count_diff=%d)' % count_diff)
| {
"content_hash": "9acd7cc7a6dc42d67ad59034c22d552f",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 77,
"avg_line_length": 26.253012048192772,
"alnum_prop": 0.6181734740706746,
"repo_name": "google/clusterfuzz",
"id": "bb6cb84a97c582e3744aa82e566cf9122e4c326e",
"size": "2754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/local/butler/scripts/build_attributes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21721"
},
{
"name": "C",
"bytes": "3485"
},
{
"name": "C++",
"bytes": "16326"
},
{
"name": "CSS",
"bytes": "16789"
},
{
"name": "Dockerfile",
"bytes": "25218"
},
{
"name": "Go",
"bytes": "16253"
},
{
"name": "HTML",
"bytes": "503044"
},
{
"name": "JavaScript",
"bytes": "9433"
},
{
"name": "Jinja",
"bytes": "3308"
},
{
"name": "PowerShell",
"bytes": "17307"
},
{
"name": "Python",
"bytes": "5085058"
},
{
"name": "Ruby",
"bytes": "93"
},
{
"name": "Shell",
"bytes": "80910"
},
{
"name": "Starlark",
"bytes": "1951"
}
],
"symlink_target": ""
} |
class IrisettError(Exception):
def __str__(self) -> str:
if len(self.args) == 1:
ret = self.args[0]
else:
ret = str(self.__class__.__name__)
return ret
class InvalidArguments(IrisettError):
pass
| {
"content_hash": "ceef6523397e379215765b18f5c469ac",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 23,
"alnum_prop": 0.5296442687747036,
"repo_name": "beebyte/irisett",
"id": "d4cee3c4a8916cdaa807d1eb89d9dc4624ba5f05",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irisett/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1865"
},
{
"name": "HTML",
"bytes": "20933"
},
{
"name": "Makefile",
"bytes": "604"
},
{
"name": "Python",
"bytes": "241040"
},
{
"name": "Shell",
"bytes": "225"
}
],
"symlink_target": ""
} |
from mininode import *
from blockstore import BlockStore, TxStore
from util import p2p_port
'''
This is a tool for comparing two or more bitcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| {
"content_hash": "1062c40e18e30041de739437de1f460a",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 145,
"avg_line_length": 43.99180327868852,
"alnum_prop": 0.5887211974411527,
"repo_name": "shaulkf/bitcoin",
"id": "9444424dcf633e8303e8d1893d92b4f0df26104a",
"size": "16270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/test_framework/comptool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "774949"
},
{
"name": "C++",
"bytes": "4061069"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18445"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "Makefile",
"bytes": "69597"
},
{
"name": "Objective-C",
"bytes": "2162"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "579845"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "31469"
}
],
"symlink_target": ""
} |
from django.core.management import BaseCommand
__author__ = 'tchen'
### Warning - do not use this script. I changed my mind when generating the stats. This is for real time stats,
### but for the problem we're solving it is not necessary.
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.gnats
stats = db['date_stats']
gnats = db['issues']
# consts
YEARS = range(2013, 2016)
TYPES = [('daily', 365), ('weekly', 53), ('monthly', 12)]
TYPE_FUNS = [
('daily', lambda x: x.timetuple().tm_yday),
('weekly', lambda x: x.date().isocalendar()[1]),
('monthly', lambda x: x.month)
]
STATS = ['created', 'resolved']
def pr_is_resolved(state):
return state in ['feedback', 'monitored', 'suspended', 'closed']
class Command(BaseCommand):
help = u'Initialize statistics data'
need_stat = False
def init_data(self, year, type, length, state):
doc = stats.find_one({'year': year, 'type': type, 'state': state})
if not doc:
self.need_stat = True
data = [0] * length
stats.insert({'year': year, 'type': type, 'state': state, 'data': data})
print 'year %s, type %s, state %s, initialized with %d data' % (year, type, state, length)
def update_one(self, state, dt, action='$inc'):
year = dt.year
for type, fun in TYPE_FUNS:
query = {'year': year, 'type': type, 'state': state}
stats.update(query, {action: {'data.%d' % (fun(dt) - 1): 1}})
def update_stats(self):
print 'Total %s PRs need to be processed.' % gnats.find({'need_stat': {'$gt': 0}}).count()
for pr in gnats.find({'need_stat': {'$gt': 0}}):
if pr['need_stat'] == 1:
self.update_one('created', pr['arrived_at'])
if pr_is_resolved(pr['state']) and pr['need_stat'] == 2:
self.update_one('resolved', pr['modified_at'])
elif not pr_is_resolved(pr['state']) and pr['need_stat'] == 3:
self.update_one('resolved', pr['modified_at'], '$dec')
pr['need_stat'] = 0
print 'All PRs processed.'
def init_stat(self):
gnats.update({}, {'$set': {'need_stat': 1}}, multi=True)
print 'All PRs are set to need re-statistics.'
def handle(self, *args, **kwargs):
for year in YEARS:
for type, length in TYPES:
for state in STATS:
self.init_data(year, type, length, state)
if self.need_stat:
self.init_stat()
self.update_stats()
| {
"content_hash": "6144b97b2e34d5c1a4ab7d00e9af9693",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 111,
"avg_line_length": 34.7027027027027,
"alnum_prop": 0.567367601246106,
"repo_name": "tyrchen/church",
"id": "12c3f65f87654cd22f75bab89439258f11de34e0",
"size": "2568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "church/management/commands/update_date_stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "190791"
},
{
"name": "Python",
"bytes": "35071"
}
],
"symlink_target": ""
} |
import time
import threading
import mido
import sys
import MRQ1
#import duplexPort
# globals
MIDI_PORT = False
MIDI_note_mapping = [None] * 127
MIDI_note_mapping[91] = [MRQ1.droneSnare,16]
MIDI_note_mapping[93] = [MRQ1.droneBongo,16]
MIDI_note_mapping[95] = [MRQ1.droneBass, 16]
MIDI_note_mapping[96] = [MRQ1.droneBrush,16]
MIDI_note_mapping[98] = [MRQ1.droneBlock,16]
MIDI_note_mapping[100] = [MRQ1.triggerSnare,None]
MIDI_note_mapping[101] = [MRQ1.triggerBongo,None]
MIDI_note_mapping[105] = [MRQ1.triggerBlock,None]
MIDI_note_mapping[107] = [MRQ1.triggerBass ,None]
MIDI_note_mapping[103] = [MRQ1.triggerBrush,None]
MIDI_note_mapping[119] = [MRQ1.toggleExternalClock,None]
MIDI_note_mapping[120] = [MRQ1.togglePower,None]
MIDI_CC_mapping = [None] * 127
MIDI_CC_mapping[74] = [MRQ1.setClockOscillator,0]
MIDI_CC_mapping[71] = [MRQ1.setClockOscillator,1]
MIDI_CC_mapping[91] = [MRQ1.setClockOscillator,2]
MIDI_CC_mapping[93] = [MRQ1.setVolume,3]
MIDI_CC_mapping[73] = [MRQ1.setBalance,4]
# init simurgh
"""
# init duplex port
def TestCallback():
while True:
time.sleep(1)
#duplexPort.init(testcallback)
testcallback = threading.Thread(target=TestCallback)
testcallback.start()
duplexPort.init(testcallback)
"""
# init MIDI
def mido_init():
midiInputs_l = mido.get_output_names()
print ">> MIDI Inputs", midiInputs_l
if len(midiInputs_l) < 2:
print "MIDI inputs not found. Check USB connection."
sys.exit(0)
else:
global MIDI_PORT
#MIDI_PORT = mido.open_output(midiOutputs_l[0])
MIDI_PORT = mido.open_input(midiInputs_l[1],callback=mapMIDI)
#MIDI_PORT = mido.open_input(midiInputs_l[0],callback=mapMIDI)
#MIDI_PORT.callback = mapMIDI
print MIDI_PORT
# MIDI mappings
def mapMIDI(msg):
print msg
if msg.type == "note_on":
mapping_l = MIDI_note_mapping[msg.note]
if mapping_l:
mapping_l[0](msg, mapping_l[1])
#fpgaParams = mapping_l[0](msg, mapping_l[1])
#print "fpgaParams",fpgaParams
#duplexPort.send(fpgaParams[0],fpgaParams[1])
if msg.type == "control_change":
mapping_l = MIDI_CC_mapping[msg.control]
if mapping_l:
mapping_l[0](msg, mapping_l[1])
#fpgaParams = mapping_l[0](msg, mapping_l[1])
#print "fpgaParams",fpgaParams
#duplexPort.send(fpgaParams[0],fpgaParams[1])
if msg.type == "note_off" and msg.note == 103:
print "asdf"
mapping_l = MIDI_note_mapping[msg.note]
print mapping_l
if mapping_l:
mapping_l[0](msg, mapping_l[1])
# signal functions
mido_init()
| {
"content_hash": "574add8a62809ae24cfded26d76e3d97",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 64,
"avg_line_length": 28.870588235294118,
"alnum_prop": 0.713121434392828,
"repo_name": "andycavatorta/RhythmBoxKing",
"id": "961b4432a93a45dc26aa0d78ff5a1306715a3137",
"size": "2455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10616"
}
],
"symlink_target": ""
} |
import os
import slack
import requests
import logging
from datetime import datetime
from get_parent import get_parent
from case_not_found import case_not_found
from googleapiclient.discovery import build_from_document
logger = logging.getLogger(__name__)
def support_add_comment(channel_id, case, comment, user_id, user_name):
"""
Add a comment to a Google Cloud support case.
Parameters
----------
channel_id : str
unique string used to idenify a Slack channel. Used to send messages to the channel
case : str
unique id of the case
comment : str
comment to be added to the case
user_id : str
the Slack user_id of the user who submitted the request. Used to send ephemeral
messages to the user
user_name : str
Slack user_name of the user that ran the command. Appended to the end of the
comment to identify who submitted submitted it, otherwise all comments will
show as coming from the case creator
"""
API_KEY = os.environ.get('API_KEY')
MAX_RETRIES = 3
# Get our discovery doc and build our service
r = requests.get('https://cloudsupport.googleapis.com/$discovery/rest'
'?key={}&labels=V2_TRUSTED_TESTER&version=v2beta'.format(API_KEY))
r.raise_for_status()
support_service = build_from_document(r.json())
client = slack.WebClient(token=os.environ.get('SLACK_TOKEN'))
client.chat_postEphemeral(
channel=channel_id,
user=user_id,
text="Your request is processing ...")
parent = get_parent(case)
if parent == 'Case not found':
case_not_found(channel_id, user_id, case)
else:
req_body = {
"body": (comment + '\n*Comment submitted by {} via Google Cloud Support'
' Slack bot*'.format(user_name))
}
req = support_service.cases().comments().create(parent=parent, body=req_body)
try:
req.execute(num_retries=MAX_RETRIES)
except BrokenPipeError as e:
error_message = str(e) + ' : {}'.format(datetime.now())
logger.error(error_message)
client.chat_postEphemeral(
channel=channel_id,
user=user_id,
text="Your comment may not have posted. Please try again later.")
else:
client.chat_postEphemeral(
channel=channel_id,
user=user_id,
text=f"You added a new comment on case {case}: {comment}")
if __name__ == "__main__":
channel_id = os.environ.get('TEST_CHANNEL_ID')
case = 'xxxxxxxx'
comment = "This is a test comment created by the Google Cloud Support Slackbot"
user_id = os.environ.get('TEST_USER_ID')
user_name = os.environ.get('TEST_USER_NAME')
support_add_comment(channel_id, case, comment, user_id, user_name)
case = os.environ.get('TEST_CASE')
support_add_comment(channel_id, case, comment, user_id, user_name)
| {
"content_hash": "97b0e86264e91ad26e7e8bf9c62812a1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 91,
"avg_line_length": 36.901234567901234,
"alnum_prop": 0.6283037805286049,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "b90b612a5ec369e831d094df227a6aaf30611b6f",
"size": "3587",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/google-cloud-support-slackbot/support_add_comment.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
} |
"""The tests for the Prometheus exporter."""
import asyncio
import pytest
from homeassistant.setup import async_setup_component
import homeassistant.components.prometheus as prometheus
@pytest.fixture
def prometheus_client(loop, hass, aiohttp_client):
"""Initialize an aiohttp_client with Prometheus component."""
assert loop.run_until_complete(async_setup_component(
hass,
prometheus.DOMAIN,
{prometheus.DOMAIN: {}},
))
return loop.run_until_complete(aiohttp_client(hass.http.app))
@asyncio.coroutine
def test_view(prometheus_client): # pylint: disable=redefined-outer-name
"""Test prometheus metrics view."""
resp = yield from prometheus_client.get(prometheus.API_ENDPOINT)
assert resp.status == 200
assert resp.headers['content-type'] == 'text/plain'
body = yield from resp.text()
body = body.split("\n")
assert len(body) > 3 # At least two comment lines and a metric
for line in body:
if line:
assert line.startswith('# ') \
or line.startswith('process_') \
or line.startswith('python_info')
| {
"content_hash": "00cf3c434ee7f14363e3caead808ee28",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 32.25714285714286,
"alnum_prop": 0.6767050487156776,
"repo_name": "persandstrom/home-assistant",
"id": "49744421c726ec15ee328c44ca35b4cb811d4a7c",
"size": "1129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/components/test_prometheus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
import os
import sys
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(dir, 'scripts'))
# testing
import mock
import unittest
from mock import patch
# program
import collect.load as Load
class CheckLoadCountryList(unittest.TestCase):
'''Unit tests for the loading of the country list.'''
#
# Check that load works.
#
def test_that_load_country_list_fails_gracefully(self):
assert Load.LoadCountryList('xxx') == False
assert Load.LoadCountryList() != False
#
# Testing object types.
#
def test_load_country_returns_array(self):
d = Load.LoadCountryList()
assert type(d) is list
def test_country_list_contains_the_right_schema(self):
d = Load.LoadCountryList()
assert d[0].get('iso', None) != None
assert d[0].get('name', None) != None
| {
"content_hash": "bbd48466e0fee837eb8b9b04e8e528b2",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 23.914285714285715,
"alnum_prop": 0.6905615292712067,
"repo_name": "luiscape/hdxscraper-reach-resource-center",
"id": "222f6209dd93ec8639436ec160fe494fd44c0c9d",
"size": "889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_load.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "77"
},
{
"name": "Python",
"bytes": "4638"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
import collections, itertools
import nltk.classify.util, nltk.metrics
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews, stopwords
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
def evaluate_classifier(featx):
negids = movie_reviews.fileids('neg')
posids = movie_reviews.fileids('pos')
negfeats = [(featx(movie_reviews.words(fileids=[f])), 'neg') for f in negids]
posfeats = [(featx(movie_reviews.words(fileids=[f])), 'pos') for f in posids]
negcutoff = len(negfeats)*3/4
poscutoff = len(posfeats)*3/4
trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff]
testfeats = negfeats[negcutoff:] + posfeats[poscutoff:]
classifier = NaiveBayesClassifier.train(trainfeats)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed = classifier.classify(feats)
testsets[observed].add(i)
print 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats)
print 'pos precision:', nltk.metrics.precision(refsets['pos'], testsets['pos'])
print 'pos recall:', nltk.metrics.recall(refsets['pos'], testsets['pos'])
print 'neg precision:', nltk.metrics.precision(refsets['neg'], testsets['neg'])
print 'neg recall:', nltk.metrics.recall(refsets['neg'], testsets['neg'])
classifier.show_most_informative_features()
def word_feats(words):
return dict([(word, True) for word in words])
print 'evaluating single word features'
evaluate_classifier(word_feats)
word_fd = FreqDist()
label_word_fd = ConditionalFreqDist()
for word in movie_reviews.words(categories=['pos']):
word_fd.inc(word.lower())
label_word_fd['pos'].inc(word.lower())
for word in movie_reviews.words(categories=['neg']):
word_fd.inc(word.lower())
label_word_fd['neg'].inc(word.lower())
# n_ii = label_word_fd[label][word]
# n_ix = word_fd[word]
# n_xi = label_word_fd[label].N()
# n_xx = label_word_fd.N()
pos_word_count = label_word_fd['pos'].N()
neg_word_count = label_word_fd['neg'].N()
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.iteritems():
pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word],
(freq, pos_word_count), total_word_count)
neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word],
(freq, neg_word_count), total_word_count)
word_scores[word] = pos_score + neg_score
best = sorted(word_scores.iteritems(), key=lambda (w,s): s, reverse=True)[:10000]
bestwords = set([w for w, s in best])
def best_word_feats(words):
return dict([(word, True) for word in words if word in bestwords])
print 'evaluating best word features'
evaluate_classifier(best_word_feats)
def best_bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=200):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
d = dict([(bigram, True) for bigram in bigrams])
d.update(best_word_feats(words))
return d
print 'evaluating best words + bigram chi_sq word features'
evaluate_classifier(best_bigram_word_feats) | {
"content_hash": "73c93d700a2b9f560d9fcade12970fe6",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 81,
"avg_line_length": 35.32222222222222,
"alnum_prop": 0.7363950927964769,
"repo_name": "Feawel/MachineLearningProject",
"id": "58bb844c8ec7992accf62f6aa235eede0a537b9e",
"size": "3204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_good.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24840"
}
],
"symlink_target": ""
} |
from setuptools import setup
import stackato
requirements = ['setuptools', 'requests>=0.11.2']
version = '0.10.0.1dev'
setup(
name='PyStackato',
version=version,
description=stackato.__doc__.strip(),
author=stackato.__author__,
license=stackato.__license__,
packages=['stackato'],
install_requires=requirements
)
| {
"content_hash": "906d9ad8eb5f58a9fe38f6b3af8b9464",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 19.22222222222222,
"alnum_prop": 0.6734104046242775,
"repo_name": "noderabbit-team/PyStackato",
"id": "232c03c897e8a0d557cdab6fa0ee06790211e0fb",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "11702"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Lint format strings: This program checks that the number of arguments passed
# to a variadic format string function matches the number of format specifiers
# in the format string.
import argparse
import doctest
import re
import sys
FALSE_POSITIVES = [
("src/dbwrapper.cpp", "vsnprintf(p, limit - p, format, backup_ap)"),
("src/index/base.cpp", "FatalError(const char *fmt, const Args &...args)"),
("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char *fmt, const Args &...args)"),
("src/util/system.cpp",
"strprintf(_(COPYRIGHT_HOLDERS).translated, COPYRIGHT_HOLDERS_SUBSTITUTION)"),
("src/validationinterface.cpp",
"LogPrint(BCLog::VALIDATION, fmt \"\\n\", __VA_ARGS__)"),
("src/tinyformat.h", "printf(const char *fmt, const Args &...args)"),
("src/tinyformat.h", "printf(const char *fmt, TINYFORMAT_VARARGS(n))"),
("src/wallet/wallet.h",
"LogPrintf((\"%s \" + fmt).c_str(), GetDisplayName(), parameters...)"),
("src/wallet/scriptpubkeyman.h",
"WalletLogPrintf(std::string fmt, Params... parameters)"),
("src/wallet/scriptpubkeyman.h",
"LogPrintf((\"%s \" + fmt).c_str(), m_storage.GetDisplayName(), parameters...)"),
("src/wallet/scriptpubkeyman.h",
"WalletLogPrintf(const std::string& fmt, const Params&... parameters)"),
]
FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS = [
("FatalError", 0),
("fprintf", 1),
("LogConnectFailure", 1),
("LogPrint", 1),
("LogPrintf", 0),
("printf", 0),
("snprintf", 2),
("sprintf", 1),
("strprintf", 0),
("tfm::format", 1), # Assuming tfm::::format(std::ostream&, ...
("vfprintf", 1),
("vprintf", 1),
("vsnprintf", 1),
("vsprintf", 1),
]
def parse_function_calls(function_name, source_code):
"""Return an array with all calls to function function_name in string source_code.
Preprocessor directives and C++ style comments ("//") in source_code are removed.
>>> len(parse_function_calls("foo", "foo();bar();foo();bar();"))
2
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[0].startswith("foo(1);")
True
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[1].startswith("foo(2);")
True
>>> len(parse_function_calls("foo", "foo();bar();// foo();bar();"))
1
>>> len(parse_function_calls("foo", "#define FOO foo();"))
0
"""
assert isinstance(function_name, str) and isinstance(
source_code, str) and function_name
lines = [re.sub("// .*", " ", line).strip()
for line in source_code.split("\n")
if not line.strip().startswith("#")]
return re.findall(
r"[^a-zA-Z_](?=({}\(.*).*)".format(function_name), " " + " ".join(lines))
def normalize(s):
"""Return a normalized version of string s with newlines, tabs and C style comments ("/* ... */")
replaced with spaces. Multiple spaces are replaced with a single space.
>>> normalize(" /* nothing */ foo\tfoo /* bar */ foo ")
'foo foo foo'
"""
assert isinstance(s, str)
s = s.replace("\n", " ")
s = s.replace("\t", " ")
s = re.sub(r"/\*.*?\*/", " ", s)
s = re.sub(" {2,}", " ", s)
return s.strip()
ESCAPE_MAP = {
r"\n": "[escaped-newline]",
r"\t": "[escaped-tab]",
r'\"': "[escaped-quote]",
}
def escape(s):
"""Return the escaped version of string s with "\\\"", "\\n" and "\\t" escaped as
"[escaped-backslash]", "[escaped-newline]" and "[escaped-tab]".
>>> unescape(escape("foo")) == "foo"
True
>>> escape(r'foo \\t foo \\n foo \\\\ foo \\ foo \\"bar\\"')
'foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]'
"""
assert isinstance(s, str)
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(raw_value, escaped_value)
return s
def unescape(s):
"""Return the unescaped version of escaped string s.
Reverses the replacements made in function escape(s).
>>> unescape(escape("bar"))
'bar'
>>> unescape("foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]")
'foo \\\\t foo \\\\n foo \\\\\\\\ foo \\\\ foo \\\\"bar\\\\"'
"""
assert isinstance(s, str)
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(escaped_value, raw_value)
return s
def parse_function_call_and_arguments(function_name, function_call):
"""Split string function_call into an array of strings consisting of:
* the string function_call followed by "("
* the function call argument #1
* ...
* the function call argument #n
* a trailing ");"
The strings returned are in escaped form. See escape(...).
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s %s", "foo", "bar");')
['foo(', '"%s %s",', ' "foo",', ' "bar"', ')']
>>> parse_function_call_and_arguments("fooprintf", 'fooprintf("%050d", i);')
['fooprintf(', '"%050d",', ' i', ')']
>>> parse_function_call_and_arguments("foo", 'foo(bar(foobar(barfoo("foo"))), foobar); barfoo')
['foo(', 'bar(foobar(barfoo("foo"))),', ' foobar', ')']
>>> parse_function_call_and_arguments("foo", "foo()")
['foo(', '', ')']
>>> parse_function_call_and_arguments("foo", "foo(123)")
['foo(', '123', ')']
>>> parse_function_call_and_arguments("foo", 'foo("foo")')
['foo(', '"foo"', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo<wchar_t>().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' foo<wchar_t>().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' foo().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo << 1, err);')
['strprintf(', '"%s (%d)",', ' foo << 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo<bar>() >> 1, err);')
['strprintf(', '"%s (%d)",', ' foo<bar>() >> 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo < 1 ? bar : foobar, err);')
['strprintf(', '"%s (%d)",', ' foo < 1 ? bar : foobar,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo < 1, err);')
['strprintf(', '"%s (%d)",', ' foo < 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo > 1 ? bar : foobar, err);')
['strprintf(', '"%s (%d)",', ' foo > 1 ? bar : foobar,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo > 1, err);')
['strprintf(', '"%s (%d)",', ' foo > 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo <= 1, err);')
['strprintf(', '"%s (%d)",', ' foo <= 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo <= bar<1, 2>(1, 2), err);')
['strprintf(', '"%s (%d)",', ' foo <= bar<1, 2>(1, 2),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo>foo<1,2>(1,2)?bar:foobar,err)');
['strprintf(', '"%s (%d)",', ' foo>foo<1,2>(1,2)?bar:foobar,', 'err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo>foo<1,2>(1,2),err)');
['strprintf(', '"%s (%d)",', ' foo>foo<1,2>(1,2),', 'err', ')']
"""
assert isinstance(function_name, str) and isinstance(
function_call, str) and function_name
remaining = normalize(escape(function_call))
expected_function_call = "{}(".format(function_name)
assert remaining.startswith(expected_function_call)
parts = [expected_function_call]
remaining = remaining[len(expected_function_call):]
open_parentheses = 1
open_template_arguments = 0
in_string = False
parts.append("")
for i, char in enumerate(remaining):
parts.append(parts.pop() + char)
if char == "\"":
in_string = not in_string
continue
if in_string:
continue
if char == "(":
open_parentheses += 1
continue
if char == ")":
open_parentheses -= 1
if open_parentheses > 1:
continue
if open_parentheses == 0:
parts.append(parts.pop()[:-1])
parts.append(char)
break
prev_char = remaining[i - 1] if i - 1 >= 0 else None
next_char = remaining[i + 1] if i + 1 <= len(remaining) - 1 else None
if (char == "<" and next_char not in [" ", "<", "="]
and prev_char not in [" ", "<"]):
open_template_arguments += 1
continue
if (char == ">" and next_char not in [" ", ">", "="] and
prev_char not in [" ", ">"] and open_template_arguments > 0):
open_template_arguments -= 1
if open_template_arguments > 0:
continue
if char == ",":
parts.append("")
return parts
def parse_string_content(argument):
"""Return the text within quotes in string argument.
>>> parse_string_content('1 "foo %d bar" 2')
'foo %d bar'
>>> parse_string_content('1 foobar 2')
''
>>> parse_string_content('1 "bar" 2')
'bar'
>>> parse_string_content('1 "foo" 2 "bar" 3')
'foobar'
>>> parse_string_content('1 "foo" 2 " " "bar" 3')
'foo bar'
>>> parse_string_content('""')
''
>>> parse_string_content('')
''
>>> parse_string_content('1 2 3')
''
"""
assert isinstance(argument, str)
string_content = ""
in_string = False
for char in normalize(escape(argument)):
if char == "\"":
in_string = not in_string
elif in_string:
string_content += char
return string_content
def count_format_specifiers(format_string):
"""Return the number of format specifiers in string format_string.
>>> count_format_specifiers("foo bar foo")
0
>>> count_format_specifiers("foo %d bar foo")
1
>>> count_format_specifiers("foo %d bar %i foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo %d foo")
3
>>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo")
4
"""
assert isinstance(format_string, str)
n = 0
in_specifier = False
for i, char in enumerate(format_string):
if format_string[i - 1:i +
1] == "%%" or format_string[i:i + 2] == "%%":
pass
elif char == "%":
in_specifier = True
n += 1
elif char in "aAcdeEfFgGinopsuxX":
in_specifier = False
elif in_specifier and char == "*":
n += 1
return n
def main(args_in):
""" Return a string output with information on string format errors
>>> main(["test/lint/lint-format-strings-tests.txt"])
test/lint/lint-format-strings-tests.txt: Expected 1 argument(s) after format string but found 2 argument(s): printf("%d", 1, 2)
test/lint/lint-format-strings-tests.txt: Expected 2 argument(s) after format string but found 3 argument(s): printf("%a %b", 1, 2, "anything")
test/lint/lint-format-strings-tests.txt: Expected 1 argument(s) after format string but found 0 argument(s): printf("%d")
test/lint/lint-format-strings-tests.txt: Expected 3 argument(s) after format string but found 2 argument(s): printf("%a%b%z", 1, "anything")
>>> main(["test/lint/lint-format-strings-tests-skip-arguments.txt"])
test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 2 argument(s): fprintf(skipped, "%d", 1, 2)
test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 0 argument(s): fprintf(skipped, "%d")
test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 2 argument(s): snprintf(skip1, skip2, "%d", 1, 2)
test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 0 argument(s): snprintf(skip1, skip2, "%d")
test/lint/lint-format-strings-tests-skip-arguments.txt: Could not parse function call string "snprintf(...)": snprintf(skip1, "%d")
"""
parser = argparse.ArgumentParser(description="This program checks that the number of arguments passed "
"to a variadic format string function matches the number of format "
"specifiers in the format string.")
parser.add_argument("file", type=argparse.FileType(
"r", encoding="utf-8"), nargs="*", help="C++ source code file (e.g. foo.cpp)")
args = parser.parse_args(args_in)
for f in args.file:
file_content = f.read()
for (function_name,
skip_arguments) in FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS:
for function_call_str in parse_function_calls(
function_name, file_content):
parts = parse_function_call_and_arguments(
function_name, function_call_str)
relevant_function_call_str = unescape("".join(parts))[:512]
if (f.name, relevant_function_call_str) in FALSE_POSITIVES:
continue
if len(parts) < 3 + skip_arguments:
print("{}: Could not parse function call string \"{}(...)\": {}".format(
f.name, function_name, relevant_function_call_str))
continue
argument_count = len(parts) - 3 - skip_arguments
format_str = parse_string_content(parts[1 + skip_arguments])
format_specifier_count = count_format_specifiers(format_str)
if format_specifier_count != argument_count:
print("{}: Expected {} argument(s) after format string but found {} argument(s): {}".format(
f.name, format_specifier_count, argument_count, relevant_function_call_str))
continue
if __name__ == "__main__":
doctest.testmod()
main(sys.argv[1:])
| {
"content_hash": "89686e08d214e114a0aaf8863effcea3",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 164,
"avg_line_length": 44.16422287390029,
"alnum_prop": 0.5710491367861886,
"repo_name": "Bitcoin-ABC/bitcoin-abc",
"id": "a0919d7fd6540ec2149948c834926b5fb24aa278",
"size": "15060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/lint/lint-format-strings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1160721"
},
{
"name": "C++",
"bytes": "9817660"
},
{
"name": "CMake",
"bytes": "195193"
},
{
"name": "CSS",
"bytes": "4284"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "HTML",
"bytes": "25754"
},
{
"name": "Java",
"bytes": "41238"
},
{
"name": "JavaScript",
"bytes": "2366459"
},
{
"name": "Kotlin",
"bytes": "3712"
},
{
"name": "M4",
"bytes": "31132"
},
{
"name": "Makefile",
"bytes": "100617"
},
{
"name": "Objective-C++",
"bytes": "5811"
},
{
"name": "PHP",
"bytes": "94504"
},
{
"name": "Perl",
"bytes": "4551"
},
{
"name": "PowerShell",
"bytes": "2277"
},
{
"name": "Python",
"bytes": "2706993"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Ruby",
"bytes": "21108"
},
{
"name": "Rust",
"bytes": "54953"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "167526"
},
{
"name": "TypeScript",
"bytes": "66320"
}
],
"symlink_target": ""
} |
import re
import web
import simplejson as json
import karesansui
from karesansui.lib.rest import Rest, auth
from karesansui.db.access.machine import findbyhost1
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, \
CHECK_CHAR, CHECK_MIN, CHECK_MAX, CHECK_ONLYSPACE, \
CHECK_UNIQUE
from karesansui.lib.utils import is_param, is_empty, preprint_r, \
base64_encode, get_ifconfig_info
from karesansui.lib.networkaddress import NetworkAddress
from karesansui.lib.parser.staticroute import staticrouteParser as Parser
from karesansui.lib.conf import read_conf, write_conf
def validates_staticroute(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
if not is_param(obj.input, 'target'):
check = False
checker.add_error(_('Specify target address for the route.'))
else:
check = checker.check_ipaddr(
_('Target'),
obj.input.target,
CHECK_EMPTY | CHECK_VALID,
) and check
if not is_param(obj.input, 'gateway'):
check = False
checker.add_error(_('Specify gateway address for the route.'))
else:
check = checker.check_ipaddr(
_('Gateway'),
obj.input.gateway,
CHECK_VALID,
) and check
obj.view.alert = checker.errors
return check
class HostBy1StaticRoute(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
host = findbyhost1(self.orm, host_id)
self.view.host_id = host_id
# unremovable entries
excludes = {
"device": ["^peth","^virbr","^sit","^xenbr","^lo","^br"],
"ipaddr": ["^0\.0\.0\.0$", "^169\.254\.0\.0$"],
}
devices = []
phydev_regex = re.compile(r"^eth[0-9]+")
for dev,dev_info in get_ifconfig_info().iteritems():
if phydev_regex.match(dev):
try:
if dev_info['ipaddr'] is not None:
devices.append(dev)
net = NetworkAddress("%s/%s" % (dev_info['ipaddr'],dev_info['mask'],))
excludes['ipaddr'].append(net.network)
except:
pass
self.view.devices = devices
parser = Parser()
status = parser.do_status()
routes = {}
for _k,_v in status.iteritems():
for _k2,_v2 in _v.iteritems():
name = base64_encode("%s@%s" % (_k2,_k,))
routes[name] = {}
routes[name]['name'] = name
routes[name]['device'] = _k
routes[name]['gateway'] = _v2['gateway']
routes[name]['flags'] = _v2['flags']
routes[name]['ref'] = _v2['ref']
routes[name]['use'] = _v2['use']
net = NetworkAddress(_k2)
routes[name]['ipaddr'] = net.ipaddr
routes[name]['netlen'] = net.netlen
routes[name]['netmask'] = net.netmask
removable = True
for _ex_key,_ex_val in excludes.iteritems():
ex_regex = "|".join(_ex_val)
mm = re.search(ex_regex,routes[name][_ex_key])
if mm:
removable = False
routes[name]['removable'] = removable
self.view.routes = routes
if self.is_mode_input():
pass
return True
@auth
def _POST(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
host = findbyhost1(self.orm, host_id)
if not validates_staticroute(self):
return web.badrequest(self.view.alert)
modules = ["staticroute"]
dop = read_conf(modules, self, host)
if dop is False:
return web.internalerror('Internal Server Error. (Timeout)')
target = self.input.target
net = NetworkAddress(target)
ipaddr = net.ipaddr
netmask = net.netmask
netlen = net.netlen
network = net.network
target = "%s/%s" % (ipaddr,netlen,)
gateway = self.input.gateway
device = self.input.device
dop.set("staticroute", [device,target], gateway)
from karesansui.lib.parser.staticroute import PARSER_COMMAND_ROUTE
if net.netlen == 32:
command = "%s add -host %s gw %s dev %s" % (PARSER_COMMAND_ROUTE,ipaddr,gateway,device,)
command = "%s add -host %s dev %s" % (PARSER_COMMAND_ROUTE,ipaddr,device,)
else:
command = "%s add -net %s netmask %s gw %s dev %s" % (PARSER_COMMAND_ROUTE,network,netmask,gateway,device,)
extra_args = {"post-command": command}
retval = write_conf(dop, self, host, extra_args=extra_args)
if retval is False:
return web.internalerror('Internal Server Error. (Adding Task)')
return web.accepted(url=web.ctx.path)
urls = (
'/host/(\d+)/staticroute[/]?(\.html|\.part|\.json)?$', HostBy1StaticRoute,
)
| {
"content_hash": "8a331ca873932e857c1781691fbfbd39",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 119,
"avg_line_length": 32.46913580246913,
"alnum_prop": 0.5406844106463878,
"repo_name": "karesansui/karesansui",
"id": "63c8195c0d487a4dcedd4bbcb1b4d8750447df41",
"size": "6419",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "karesansui/gadget/hostby1staticroute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79865"
},
{
"name": "HTML",
"bytes": "32774"
},
{
"name": "JavaScript",
"bytes": "286445"
},
{
"name": "Makefile",
"bytes": "265"
},
{
"name": "Python",
"bytes": "2226164"
},
{
"name": "Shell",
"bytes": "18293"
}
],
"symlink_target": ""
} |
from datetime import timedelta
from itertools import izip_longest
import pytz
from google.appengine.ext import ndb
TIMESPAN_LAST_WEEK = 'last_week'
TIMESPAN_THIS_WEEK = 'this_week'
def chunk(iterable, chunk_size):
"""Collect data into fixed-length chunks or blocks (http://docs.python.org/2/library/itertools.html#recipes)"""
args = [iter(iterable)] * chunk_size
return izip_longest(*args)
def to_the_future(dict):
for k, v in dict.iteritems():
if issubclass(v.__class__, ndb.Future):
dict[k] = v.get_result()
def utc_week_limits(utc_dt):
"""Returns US/Pacific start (12:00 am Sunday) and end (11:59 pm Saturday) of the week containing utc_dt, in UTC."""
local_now = utc_dt.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Pacific'))
local_week_start = local_now - timedelta(
days=local_now.weekday() + 1,
hours=local_now.hour,
minutes=local_now.minute,
seconds=local_now.second,
microseconds=local_now.microsecond,
)
local_week_end = local_week_start + timedelta(days=7, minutes=-1)
utc_week_start = local_week_start.astimezone(pytz.utc).replace(tzinfo=None)
utc_week_end = local_week_end.astimezone(pytz.utc).replace(tzinfo=None)
return (utc_week_start, utc_week_end)
| {
"content_hash": "7ab6429b22d437b4de069f0da6af191e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 119,
"avg_line_length": 32.375,
"alnum_prop": 0.6810810810810811,
"repo_name": "Yelp/love",
"id": "6672ddfa5e1d349ffdabd8edd4853dc3612bfc70",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logic/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5902"
},
{
"name": "HTML",
"bytes": "43114"
},
{
"name": "JavaScript",
"bytes": "836"
},
{
"name": "Makefile",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "118945"
}
],
"symlink_target": ""
} |
"""Manifest validation."""
from __future__ import annotations
from pathlib import Path
from urllib.parse import urlparse
from awesomeversion import (
AwesomeVersion,
AwesomeVersionException,
AwesomeVersionStrategy,
)
import voluptuous as vol
from voluptuous.humanize import humanize_error
from .model import Config, Integration
DOCUMENTATION_URL_SCHEMA = "https"
DOCUMENTATION_URL_HOST = "www.home-assistant.io"
DOCUMENTATION_URL_PATH_PREFIX = "/integrations/"
DOCUMENTATION_URL_EXCEPTIONS = {"https://www.home-assistant.io/hassio"}
SUPPORTED_QUALITY_SCALES = ["gold", "internal", "platinum", "silver"]
SUPPORTED_IOT_CLASSES = [
"assumed_state",
"calculated",
"cloud_polling",
"cloud_push",
"local_polling",
"local_push",
]
# List of integrations that are supposed to have no IoT class
NO_IOT_CLASS = [
"air_quality",
"alarm_control_panel",
"api",
"auth",
"automation",
"binary_sensor",
"blueprint",
"calendar",
"camera",
"climate",
"color_extractor",
"config",
"configurator",
"counter",
"cover",
"default_config",
"device_automation",
"device_tracker",
"discovery",
"downloader",
"fan",
"ffmpeg",
"frontend",
"geo_location",
"history",
"homeassistant",
"humidifier",
"image_processing",
"image",
"input_boolean",
"input_datetime",
"input_number",
"input_select",
"input_text",
"intent_script",
"intent",
"light",
"lock",
"logbook",
"logger",
"lovelace",
"mailbox",
"map",
"media_player",
"media_source",
"my",
"notify",
"number",
"onboarding",
"panel_custom",
"panel_iframe",
"plant",
"profiler",
"proxy",
"python_script",
"remote",
"safe_mode",
"scene",
"script",
"search",
"select",
"sensor",
"siren",
"stt",
"switch",
"system_health",
"system_log",
"tag",
"timer",
"trace",
"tts",
"vacuum",
"water_heater",
"weather",
"webhook",
"websocket_api",
"zone",
]
def documentation_url(value: str) -> str:
"""Validate that a documentation url has the correct path and domain."""
if value in DOCUMENTATION_URL_EXCEPTIONS:
return value
parsed_url = urlparse(value)
if parsed_url.scheme != DOCUMENTATION_URL_SCHEMA:
raise vol.Invalid("Documentation url is not prefixed with https")
if parsed_url.netloc == DOCUMENTATION_URL_HOST and not parsed_url.path.startswith(
DOCUMENTATION_URL_PATH_PREFIX
):
raise vol.Invalid(
"Documentation url does not begin with www.home-assistant.io/integrations"
)
return value
def verify_lowercase(value: str):
"""Verify a value is lowercase."""
if value.lower() != value:
raise vol.Invalid("Value needs to be lowercase")
return value
def verify_uppercase(value: str):
"""Verify a value is uppercase."""
if value.upper() != value:
raise vol.Invalid("Value needs to be uppercase")
return value
def verify_version(value: str):
"""Verify the version."""
try:
AwesomeVersion(
value,
[
AwesomeVersionStrategy.CALVER,
AwesomeVersionStrategy.SEMVER,
AwesomeVersionStrategy.SIMPLEVER,
AwesomeVersionStrategy.BUILDVER,
AwesomeVersionStrategy.PEP440,
],
)
except AwesomeVersionException:
raise vol.Invalid(f"'{value}' is not a valid version.")
return value
def verify_wildcard(value: str):
"""Verify the matcher contains a wildcard."""
if "*" not in value:
raise vol.Invalid(f"'{value}' needs to contain a wildcard matcher")
return value
MANIFEST_SCHEMA = vol.Schema(
{
vol.Required("domain"): str,
vol.Required("name"): str,
vol.Optional("config_flow"): bool,
vol.Optional("mqtt"): [str],
vol.Optional("zeroconf"): [
vol.Any(
str,
vol.Schema(
{
vol.Required("type"): str,
vol.Optional("macaddress"): vol.All(
str, verify_uppercase, verify_wildcard
),
vol.Optional("manufacturer"): vol.All(str, verify_lowercase),
vol.Optional("name"): vol.All(str, verify_lowercase),
}
),
)
],
vol.Optional("ssdp"): vol.Schema(
vol.All([vol.All(vol.Schema({}, extra=vol.ALLOW_EXTRA), vol.Length(min=1))])
),
vol.Optional("homekit"): vol.Schema({vol.Optional("models"): [str]}),
vol.Optional("dhcp"): [
vol.Schema(
{
vol.Optional("macaddress"): vol.All(
str, verify_uppercase, verify_wildcard
),
vol.Optional("hostname"): vol.All(str, verify_lowercase),
}
)
],
vol.Optional("usb"): [
vol.Schema(
{
vol.Optional("vid"): vol.All(str, verify_uppercase),
vol.Optional("pid"): vol.All(str, verify_uppercase),
vol.Optional("known_devices"): [str],
}
)
],
vol.Required("documentation"): vol.All(
vol.Url(), documentation_url # pylint: disable=no-value-for-parameter
),
vol.Optional(
"issue_tracker"
): vol.Url(), # pylint: disable=no-value-for-parameter
vol.Optional("quality_scale"): vol.In(SUPPORTED_QUALITY_SCALES),
vol.Optional("requirements"): [str],
vol.Optional("dependencies"): [str],
vol.Optional("after_dependencies"): [str],
vol.Required("codeowners"): [str],
vol.Optional("disabled"): str,
vol.Optional("iot_class"): vol.In(SUPPORTED_IOT_CLASSES),
}
)
CUSTOM_INTEGRATION_MANIFEST_SCHEMA = MANIFEST_SCHEMA.extend(
{
vol.Optional("version"): vol.All(str, verify_version),
}
)
def validate_version(integration: Integration):
"""
Validate the version of the integration.
Will be removed when the version key is no longer optional for custom integrations.
"""
if not integration.manifest.get("version"):
integration.add_error("manifest", "No 'version' key in the manifest file.")
return
def validate_manifest(integration: Integration, core_components_dir: Path) -> None:
"""Validate manifest."""
if not integration.manifest:
return
try:
if integration.core:
MANIFEST_SCHEMA(integration.manifest)
else:
CUSTOM_INTEGRATION_MANIFEST_SCHEMA(integration.manifest)
except vol.Invalid as err:
integration.add_error(
"manifest", f"Invalid manifest: {humanize_error(integration.manifest, err)}"
)
if integration.manifest["domain"] != integration.path.name:
integration.add_error("manifest", "Domain does not match dir name")
if (
not integration.core
and (core_components_dir / integration.manifest["domain"]).exists()
):
integration.add_warning(
"manifest", "Domain collides with built-in core integration"
)
if (
integration.manifest["domain"] in NO_IOT_CLASS
and "iot_class" in integration.manifest
):
integration.add_error("manifest", "Domain should not have an IoT Class")
if (
integration.manifest["domain"] not in NO_IOT_CLASS
and "iot_class" not in integration.manifest
):
integration.add_error("manifest", "Domain is missing an IoT Class")
if not integration.core:
validate_version(integration)
def validate(integrations: dict[str, Integration], config: Config) -> None:
"""Handle all integrations manifests."""
core_components_dir = config.root / "homeassistant/components"
for integration in integrations.values():
validate_manifest(integration, core_components_dir)
| {
"content_hash": "ed18f204a1349b7c3528a580dd09ecb7",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 88,
"avg_line_length": 27.60942760942761,
"alnum_prop": 0.5808536585365853,
"repo_name": "sander76/home-assistant",
"id": "8c9776ed7c9e06007aefe003f6be9184220456f9",
"size": "8200",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "script/hassfest/manifest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""
Solve day 15 of Advent of Code.
http://adventofcode.com/2016/day/15
"""
import re
from collections import namedtuple
Disc = namedtuple('Disc', ['name', 'num_pos', 'start_pos'])
def discs_from_input(data):
discs = []
for line in data:
match = re.match(r'.+ #(?P<name>\d+) .+ (?P<num_pos>\d+) positions; '
'.+ position (?P<start_pos>\d+).', line)
discs.append(Disc(
name=int(match.group('name')),
num_pos=int(match.group('num_pos')),
start_pos=int(match.group('start_pos'))))
return sorted(discs)
def pos_at_time(disc, t):
return (t + disc.start_pos) % disc.num_pos
def find_goal_state(discs):
return [-disc.name % disc.num_pos for disc in discs]
def find_time_to_push(discs):
goal = find_goal_state(discs)
t = 0
while True:
positions = [pos_at_time(disc, t) for disc in discs]
if positions == goal:
break
t += 1
return t
if __name__ == '__main__':
with open('input.txt') as f:
discs = discs_from_input(f)
print("Part 1:", find_time_to_push(discs))
discs.append(Disc(
name=discs[-1].name+1,
num_pos=11,
start_pos=0))
print("Part 2:", find_time_to_push(discs))
| {
"content_hash": "a0594a059318fdc0c3b08db503b5ec19",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 23.89090909090909,
"alnum_prop": 0.541095890410959,
"repo_name": "mpirnat/aoc2016",
"id": "11e6766c9bbc613667a17052d7835ee09a65932d",
"size": "1337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day15/day15.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70457"
}
],
"symlink_target": ""
} |
"""
eve.settings
~~~~~~~~~~~~
Default API settings. These can be overridden by editing this file or, more
appropriately, by using a custom settings module (see the optional
'settings' argument or the EVE_SETTING environment variable).
:copyright: (c) 2016 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
.. versionchanged:: 0.7
'OPLOG_RETURN_EXTRA_FIELD' added and set to False.
'ENFORCE_IF_MATCH'added and set to True.
.. versionchanged:: 0.6
'UPSERT_ON_PUT? added and set to True.
'STANDARD_ERRORS' added.
'JSONP_ARGUMENT' added and set to None.
'HEADER_TOTAL_COUNT' added and set to 'X-Total-Count'.
'RETURN_MEDIA_AS_URL' added and set to None.
'MEDIA_ENDPOINT' added and set to 'media'.
'MEDIA_URL' added and set to regex("[a-f0-9]{24}").
'SOFT_DELETE' added and set to False.
'DELETED' added and set to '_deleted'.
'SHOW_DELETED_PARAM' added and set to 'show_deleted'.
'SCHEMA_ENDPOINT' added and set to None
.. versionchanged:: 0.5
'SERVER_NAME' removed.
'URL_PROTOCOL' removed.
'OPLOG' added and set to False.
'OPLOG_NAME' added and set to 'oplog'.
'OPLOG_METHODS' added and set to all edit operations.
'OPLOG_ENDPOINT' added and set to None.
'OPLOG_AUDIT' added and set to True.
'QUERY_WHERE' added and set to 'where'
'QUERY_PROJECTION' added and set to 'projection'
'QUERY_SORT' added and set to 'sort'
'QUERY_PAGE' added and set to 'page'
'QUERY_MAX_RESULTS' added and set to 'max_results'
'QUERY_EMBEDDED' added and set to 'embedded'
'INTERNAL_RESOURCE' added and set to False
.. versionchanged:: 0.4
'META' added and set to '_meta'.
'ERROR' added and set to '_error'.
'URL_PROTOCOL' added and set to ''.
'BANDWIDTH_SAVER' added and set to True.
'VERSION' added and set to '_version'.
'VERSIONS' added and set to '_versions'.
'VERSIONING' added and set to False.
'VERSION_PARAM' added and set to 'version'.
'LATEST_VERSION' added and set to '_latest_version'.
'VERSION_ID_SUFFIX' added and set to '_document'.
'VERSION_DIFF_INCLUDE' added and set to [].
.. versionchanged:: 0.3
X_MAX_AGE added and set to 21600.
.. versionchanged:: 0.2
IF_MATCH defaults to True.
'LINKS' defaults to '_links'.
'ITEMS' defaults to '_items'.
'STATUS' defaults to 'status'.
'ISSUES' defaults to 'issues'.
'regex' is now part of 'ITEM_URL' default string.
.. versionchanged:: 0.1.1
'SERVER_NAME' defaults to None.
.. versionchanged:: 0.1.0
'EMBEDDING' added and set to True.
'HATEOAS' added and set to True.
.. versionchanged:: 0.0.9
'FILTERS' boolean changed to 'ALLOWED_FILTERS' list.
'AUTH_USERNAME_FIELD' renamed to 'AUTH_FIELD', and default value set to
None.
'DATE_FORMAT now using GMT instead of UTC.
.. versionchanged:: 0.0.7
'EXTRA_RESPONSE_FIELDS added and set to an empty list.
.. versionchanged:: 0.0.6
'PROJECTION' added and set to True.
'ALLOW_UNKNOWN' added and set to False.
.. versionchanged:: 0.0.5
'AUTH_USERNAME_FIELD' keyword added to support 'user-restricted resource
access.
'X_DOMAIN' keyword added to support Cross-Origin Resource Sharing CORS
"""
# DEBUG = True
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
STATUS_OK = "OK"
STATUS_ERR = "ERR"
LAST_UPDATED = '_updated'
DATE_CREATED = '_created'
ISSUES = '_issues'
STATUS = '_status'
ERROR = '_error'
ITEMS = '_items'
LINKS = '_links'
ETAG = '_etag'
VERSION = '_version' # field that stores the version number
DELETED = '_deleted' # field to store soft delete status
META = '_meta'
INFO = None
VALIDATION_ERROR_STATUS = 422
# return a single field validation error as a list (by default a single error
# is retuned as string, while multiple errors are returned as a list).
VALIDATION_ERROR_AS_LIST = False
# codes for which we want to return a standard response which includes
# a JSON body with the status, code, and description.
STANDARD_ERRORS = [400, 401, 404, 405, 406, 409, 410, 412, 422, 428]
# field returned on GET requests so we know if we have the latest copy even if
# we access a specific version
LATEST_VERSION = '_latest_version'
# appended to ID_FIELD, holds the original document id in parallel collection
VERSION_ID_SUFFIX = '_document'
VERSION_DIFF_INCLUDE = [] # always include these fields when diffing
API_VERSION = ''
URL_PREFIX = ''
ID_FIELD = '_id'
CACHE_CONTROL = ''
CACHE_EXPIRES = 0
ITEM_CACHE_CONTROL = ''
X_DOMAINS = None # CORS disabled by default.
X_HEADERS = None # CORS disabled by default.
X_EXPOSE_HEADERS = None # CORS disabled by default.
X_ALLOW_CREDENTIALS = None # CORS disabled by default.
X_MAX_AGE = 21600 # Access-Control-Max-Age when CORS is enabled
HATEOAS = True # HATEOAS enabled by default.
IF_MATCH = True # IF_MATCH (ETag match) enabled by default.
ENFORCE_IF_MATCH = True # ENFORCE_IF_MATCH enabled by default.
ALLOWED_FILTERS = ['*'] # filtering enabled by default
VALIDATE_FILTERS = False
SORTING = True # sorting enabled by default.
JSON_SORT_KEYS = False # json key sorting
EMBEDDING = True # embedding enabled by default
PROJECTION = True # projection enabled by default
PAGINATION = True # pagination enabled by default.
PAGINATION_LIMIT = 50
PAGINATION_DEFAULT = 25
VERSIONING = False # turn document versioning on or off.
VERSIONS = '_versions' # suffix for parallel collection w/old versions
VERSION_PARAM = 'version' # URL param for specific version of a document.
INTERNAL_RESOURCE = False # resources are public by default.
JSONP_ARGUMENT = None # JSONP disabled by default.
SOFT_DELETE = False # soft delete disabled by default.
SHOW_DELETED_PARAM = 'show_deleted'
BULK_ENABLED = True
OPLOG = False # oplog is disabled by default.
OPLOG_NAME = 'oplog' # default oplog resource name.
OPLOG_ENDPOINT = None # oplog endpoint is disabled by default.
OPLOG_AUDIT = True # oplog audit enabled by default.
OPLOG_METHODS = ['DELETE',
'POST',
'PATCH',
'PUT'] # oplog logs all operations by default.
OPLOG_CHANGE_METHODS = ['DELETE',
'PATCH',
'PUT'] # methods which write changes to the oplog
OPLOG_RETURN_EXTRA_FIELD = False # oplog does not return the 'extra' field.
RESOURCE_METHODS = ['GET']
ITEM_METHODS = ['GET']
PUBLIC_METHODS = []
ALLOWED_ROLES = []
ALLOWED_READ_ROLES = []
ALLOWED_WRITE_ROLES = []
PUBLIC_ITEM_METHODS = []
ALLOWED_ITEM_ROLES = []
ALLOWED_ITEM_READ_ROLES = []
ALLOWED_ITEM_WRITE_ROLES = []
# globally enables / disables HTTP method overriding
ALLOW_OVERRIDE_HTTP_METHOD = True
ITEM_LOOKUP = True
ITEM_LOOKUP_FIELD = ID_FIELD
ITEM_URL = 'regex("[a-f0-9]{24}")'
UPSERT_ON_PUT = True # insert unexisting documents on PUT.
# use a simple file response format by default
EXTENDED_MEDIA_INFO = []
RETURN_MEDIA_AS_BASE64_STRING = True
RETURN_MEDIA_AS_URL = False
MEDIA_ENDPOINT = 'media'
MEDIA_URL = 'regex("[a-f0-9]{24}")'
MEDIA_BASE_URL = None
MULTIPART_FORM_FIELDS_AS_JSON = False
SCHEMA_ENDPOINT = None
# list of extra fields to be included with every POST response. This list
# should not include the 'standard' fields (ID_FIELD, LAST_UPDATED,
# DATE_CREATED, and ETAG). Only relevant when bandwidth saving mode is on.
EXTRA_RESPONSE_FIELDS = []
BANDWIDTH_SAVER = True
# default query parameters
QUERY_WHERE = 'where'
QUERY_PROJECTION = 'projection'
QUERY_SORT = 'sort'
QUERY_PAGE = 'page'
QUERY_MAX_RESULTS = 'max_results'
QUERY_EMBEDDED = 'embedded'
QUERY_AGGREGATION = 'aggregate'
HEADER_TOTAL_COUNT = 'X-Total-Count'
# user-restricted resource access is disabled by default.
AUTH_FIELD = None
# don't allow unknown key/value pairs for POST/PATCH payloads.
ALLOW_UNKNOWN = False
# don't ignore unknown schema rules (raise SchemaError)
TRANSPARENT_SCHEMA_RULES = False
# Rate limits are disabled by default. Needs a running redis-server.
RATE_LIMIT_GET = None
RATE_LIMIT_POST = None
RATE_LIMIT_PATCH = None
RATE_LIMIT_DELETE = None
# MONGO defaults
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
# disallow Mongo's javascript queries as they might be vulnerable to injection
# attacks ('ReDoS' especially), are probably too complex for the average API
# end-user and finally can seriously impact overall performance.
MONGO_QUERY_BLACKLIST = ['$where', '$regex']
# Explicitly set default write_concern to 'safe' (do regular
# aknowledged writes). This is also the current PyMongo/Mongo default setting.
MONGO_WRITE_CONCERN = {'w': 1}
| {
"content_hash": "67e3dabfac0eb90679a3ff0768b83c81",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 79,
"avg_line_length": 36.69635627530364,
"alnum_prop": 0.6577669902912622,
"repo_name": "mugurrus/eve",
"id": "1ac76ef771e78404abe12f613b531c2c50a04cd6",
"size": "9089",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "eve/default_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "775652"
}
],
"symlink_target": ""
} |
import argparse
class Context(object):
def __init__(self, parser=None):
self.__parser = parser
def abort(self, message):
if not self.__parser:
return
self.__parser.error(message)
class Command(object):
def __init__(self, args, func):
self.arguments = args
self.parser = None
self.name = func.__name__
self.doc = func.__doc__
self.func = func
def callback(self, args):
ctx = Context(self.parser)
if hasattr(self.func, "__pass_ctx__"):
self.func(ctx, args)
else:
self.func(args)
@classmethod
def command(cls, func):
if not hasattr(func, "__cmd_args__"):
func.__cmd_args__ = []
func.__cmd_args__.reverse()
return cls(func.__cmd_args__, func)
@classmethod
def pass_ctx(cls, func):
func.__pass_ctx__ = True
return func
@classmethod
def argument(cls, *args, **kwargs):
def deco(func):
if not hasattr(func, "__cmd_args__"):
func.__cmd_args__ = []
func.__cmd_args__.append((args, kwargs))
return func
return deco
def __call__(self):
self.parser = argparse.ArgumentParser()
for args, kwargs in self.arguments:
self.parser.add_argument(*args, **kwargs)
args = self.parser.parse_args()
self.callback(args)
class CommandParser(object):
def __init__(self, *args, **kwargs):
self.parser = argparse.ArgumentParser(*args, **kwargs)
self.subparser = self.parser.add_subparsers(title="Subcommands")
def add_command(self, command):
parser = self.subparser.add_parser(command.name, help=command.doc)
command.parser = parser
for args, kwargs in command.arguments:
parser.add_argument(*args, **kwargs)
parser.set_defaults(func=command.callback)
def run(self):
args = self.parser.parse_args()
args.func(args)
# convinent alias
command = Command.command
argument = Command.argument
pass_ctx = Command.pass_ctx
| {
"content_hash": "a3e3c74a922bb3a1f48728e36e30bd74",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 74,
"avg_line_length": 26.873417721518987,
"alnum_prop": 0.5713612812058408,
"repo_name": "eleme/ruskit",
"id": "1074d73e63e8184c113f6d35ccfc060126745345",
"size": "2123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ruskit/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "97225"
}
],
"symlink_target": ""
} |
from flask import request, redirect, current_app
YEAR_IN_SECS = 31536000
class SSLify(object):
"""Secures your Flask App."""
def __init__(self, app=None, age=YEAR_IN_SECS, subdomains=False,
permanent=False, skips=None, includes=None):
self.app = app or current_app
self.hsts_age = age
self.app.config.setdefault('SSLIFY_SUBDOMAINS', False)
self.app.config.setdefault('SSLIFY_PERMANENT', False)
self.app.config.setdefault('SSLIFY_SKIPS', None)
self.hsts_include_subdomains = subdomains or self.app.config['SSLIFY_SUBDOMAINS']
self.permanent = permanent or self.app.config['SSLIFY_PERMANENT']
self.skip_list = skips or self.app.config['SSLIFY_SKIPS']
self.include_list = includes or self.app.config['SSLIFY_INCLUDES']
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Configures the configured Flask app to enforce SSL."""
app.before_request(self.redirect_to_ssl)
app.after_request(self.set_hsts_header)
@property
def hsts_header(self):
"""Returns the proper HSTS policy."""
hsts_policy = 'max-age={0}'.format(self.hsts_age)
if self.hsts_include_subdomains:
hsts_policy += '; includeSubDomains'
return hsts_policy
@property
def skip(self):
"""Checks the skip list."""
# Should we skip?
if self.skip_list and isinstance(self.skip_list, list):
for skip in self.skip_list:
if request.path.startswith('/{0}'.format(skip)):
return True
return False
# Custom addition. Requires url to be in includes to work.
@property
def include(self):
"""Checks the include list."""
# Should we include?
if self.include_list and isinstance(self.include_list, list):
for include in self.include_list:
if request.path.startswith('/{0}'.format(include)):
return True
return False
def redirect_to_ssl(self):
"""Redirect incoming requests to HTTPS."""
# Should we redirect?
criteria = [
request.is_secure,
current_app.debug,
request.headers.get('X-Forwarded-Proto', 'http') == 'https'
]
if not any(criteria) and not self.skip:
if self.include_list and isinstance(self.include_list, list):
if not self.include:
return
if request.url.startswith('http://'):
url = request.url.replace('http://', 'https://', 1)
code = 302
if self.permanent:
code = 301
r = redirect(url, code=code)
return r
def set_hsts_header(self, response):
"""Adds HSTS header to each response."""
# Should we add STS header?
if request.is_secure and not self.skip:
response.headers.setdefault('Strict-Transport-Security', self.hsts_header)
return response
| {
"content_hash": "44a46d73828f8314ecdbad4e5f6584f8",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 89,
"avg_line_length": 35.15909090909091,
"alnum_prop": 0.5824175824175825,
"repo_name": "David-OConnor/lakenheath",
"id": "606294842061d47e8e05b98e9cfa95fac9c3db9b",
"size": "3151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_sslify2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135467"
},
{
"name": "HTML",
"bytes": "19255"
},
{
"name": "JavaScript",
"bytes": "7283"
},
{
"name": "Python",
"bytes": "34076"
}
],
"symlink_target": ""
} |
import json
import logging
import os
logger = logging.getLogger(__name__)
CONFIG_FILE_PATH_ENV_PROPERTY_NAME = "REPOUR_CONFIG_FILE_PATH"
_cached_configuration = None
async def get_configuration():
return get_configuration_sync()
def get_configuration_sync():
global _cached_configuration
def get_config_file_path():
config_file_path = os.environ.get(
CONFIG_FILE_PATH_ENV_PROPERTY_NAME,
os.path.dirname(os.path.realpath(__file__)) + "/default-config.json",
)
if not os.path.isfile(config_file_path):
raise Exception(
"Could not find configuration file '" + config_file_path + "'."
)
return config_file_path
def load_configuration(config_file_path):
f = None
try:
f = open(config_file_path, "r")
return json.load(f)
finally:
if f is not None:
f.close()
if _cached_configuration is None:
config_file_path = get_config_file_path()
_cached_configuration = load_configuration(config_file_path)
logger.info(
"Loaded configuration '"
+ str(_cached_configuration)
+ "' from '"
+ str(config_file_path)
+ "'."
)
return _cached_configuration
| {
"content_hash": "ec0bbe364a5a509bcbaa602e1dfb5298",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 81,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.5743243243243243,
"repo_name": "project-ncl/repour",
"id": "02b32fff6553e2d3cd0e674c4c4366501c66f92b",
"size": "1347",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "repour/config/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "230290"
},
{
"name": "Shell",
"bytes": "8050"
}
],
"symlink_target": ""
} |
import random
from twisted.internet import task
def f():
return "Hopefully this will be called in 3 seconds or less"
def main(reactor):
delay = random.uniform(1, 5)
def called(result):
print("{0} seconds later:".format(delay), result)
d = task.deferLater(reactor, delay, f)
d.addTimeout(3, reactor).addBoth(called)
return d
# f() will be timed out if the random delay is greater than 3 seconds
task.react(main)
| {
"content_hash": "62651a9e70139f15715c96856d3bf009",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 23.63157894736842,
"alnum_prop": 0.6859688195991092,
"repo_name": "qrsforever/workspace",
"id": "2122871a660370e2d51b118bb7273acce5e93504",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/learn/twisted/timeouts.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "208"
},
{
"name": "C",
"bytes": "591303"
},
{
"name": "C++",
"bytes": "98511"
},
{
"name": "CLIPS",
"bytes": "52178"
},
{
"name": "HTML",
"bytes": "1780"
},
{
"name": "HiveQL",
"bytes": "13"
},
{
"name": "Java",
"bytes": "381448"
},
{
"name": "Jupyter Notebook",
"bytes": "3148168"
},
{
"name": "Makefile",
"bytes": "108609"
},
{
"name": "Python",
"bytes": "991124"
},
{
"name": "R",
"bytes": "22072"
},
{
"name": "Ruby",
"bytes": "7046"
},
{
"name": "Shell",
"bytes": "119856"
},
{
"name": "TSQL",
"bytes": "5817"
}
],
"symlink_target": ""
} |
"""
Django settings for website project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o&w8u^%r5_knrl075m@qgzdf5o7951%sj^z2=+6na0x6*nos!c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'collisions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'website.urls'
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'vs_collisions',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "3bc18a1dfad92d01802ab555144675f7",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 71,
"avg_line_length": 24.470588235294116,
"alnum_prop": 0.7254807692307692,
"repo_name": "paulvstheworld/vs_collisions",
"id": "23c30959ae601f157502def8b83f2a1916dfd2dc",
"size": "2080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/website/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10053"
}
],
"symlink_target": ""
} |
"""
Classify an image using individual model files
Use this script as an example to build your own tool
"""
import argparse
import os
import time
from google.protobuf import text_format
import numpy as np
import PIL.Image
import scipy.misc
os.environ['GLOG_minloglevel'] = '2' # Suppress most caffe output
import caffe
from caffe.proto import caffe_pb2
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def get_transformer(deploy_file, mean_file=None):
"""
Returns an instance of caffe.io.Transformer
Arguments:
deploy_file -- path to a .prototxt file
Keyword arguments:
mean_file -- path to a .binaryproto file (optional)
"""
network = caffe_pb2.NetParameter()
with open(deploy_file) as infile:
text_format.Merge(infile.read(), network)
if network.input_shape:
dims = network.input_shape[0].dim
else:
dims = network.input_dim[:4]
t = caffe.io.Transformer(
inputs = {'data': dims}
)
t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)
# color images
if dims[1] == 3:
# channel swap
t.set_channel_swap('data', (2,1,0))
if mean_file:
# set mean pixel
with open(mean_file,'rb') as infile:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(infile.read())
if blob.HasField('shape'):
blob_dims = blob.shape
assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
elif blob.HasField('num') and blob.HasField('channels') and \
blob.HasField('height') and blob.HasField('width'):
blob_dims = (blob.num, blob.channels, blob.height, blob.width)
else:
raise ValueError('blob does not provide shape or 4d dimensions')
pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
t.set_mean('data', pixel)
return t
def load_image(path, height, width, mode='RGB'):
"""
Load an image from disk
Returns an np.ndarray (channels x width x height)
Arguments:
path -- path to an image on disk
width -- resize dimension
height -- resize dimension
Keyword arguments:
mode -- the PIL mode that the image should be converted to
(RGB for color or L for grayscale)
"""
image = PIL.Image.open(path)
image = image.convert(mode)
image = np.array(image)
# squash
image = scipy.misc.imresize(image, (height, width), 'bilinear')
return image
def forward_pass(images, net, transformer, batch_size=None):
"""
Returns scores for each image as an np.ndarray (nImages x nClasses)
Arguments:
images -- a list of np.ndarrays
net -- a caffe.Net
transformer -- a caffe.io.Transformer
Keyword arguments:
batch_size -- how many images can be processed at once
(a high value may result in out-of-memory errors)
"""
if batch_size is None:
batch_size = 1
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:,:,np.newaxis])
else:
caffe_images.append(image)
dims = transformer.inputs['data'][1:]
scores = None
for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
new_shape = (len(chunk),) + tuple(dims)
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
image_data = transformer.preprocess('data', image)
net.blobs['data'].data[index] = image_data
start = time.time()
output = net.forward()[net.outputs[-1]]
end = time.time()
if scores is None:
scores = np.copy(output)
else:
scores = np.vstack((scores, output))
print 'Processed %s/%s images in %f seconds ...' % (len(scores), len(caffe_images), (end - start))
return scores
def read_labels(labels_file):
"""
Returns a list of strings
Arguments:
labels_file -- path to a .txt file
"""
if not labels_file:
print 'WARNING: No labels file provided. Results will be difficult to interpret.'
return None
labels = []
with open(labels_file) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels), 'No labels found'
return labels
def classify(caffemodel, deploy_file, image_files,
mean_file=None, labels_file=None, batch_size=None, use_gpu=True):
"""
Classify some images against a Caffe model and print the results
Arguments:
caffemodel -- path to a .caffemodel
deploy_file -- path to a .prototxt
image_files -- list of paths to images
Keyword arguments:
mean_file -- path to a .binaryproto
labels_file path to a .txt file
use_gpu -- if True, run inference on the GPU
"""
# Load the model and images
net = get_net(caffemodel, deploy_file, use_gpu)
transformer = get_transformer(deploy_file, mean_file)
_, channels, height, width = transformer.inputs['data']
if channels == 3:
mode = 'RGB'
elif channels == 1:
mode = 'L'
else:
raise ValueError('Invalid number for channels: %s' % channels)
images = [load_image(image_file, height, width, mode) for image_file in image_files]
labels = read_labels(labels_file)
# Classify the image
scores = forward_pass(images, net, transformer, batch_size=batch_size)
### Process the results
indices = (-scores).argsort()[:, :5] # take top 5 results
classifications = []
for image_index, index_list in enumerate(indices):
result = []
for i in index_list:
# 'i' is a category in labels and also an index into scores
if labels is None:
label = 'Class #%s' % i
else:
label = labels[i]
result.append((label, round(100.0*scores[image_index, i],4)))
classifications.append(result)
for index, classification in enumerate(classifications):
print '{:-^80}'.format(' Prediction for %s ' % image_files[index])
for label, confidence in classification:
print '{:9.4%} - "{}"'.format(confidence/100.0, label)
print
if __name__ == '__main__':
script_start_time = time.time()
parser = argparse.ArgumentParser(description='Classification example - DIGITS')
### Positional arguments
parser.add_argument('caffemodel', help='Path to a .caffemodel')
parser.add_argument('deploy_file', help='Path to the deploy file')
parser.add_argument('image_file',
nargs='+',
help='Path[s] to an image')
### Optional arguments
parser.add_argument('-m', '--mean',
help='Path to a mean file (*.npy)')
parser.add_argument('-l', '--labels',
help='Path to a labels file')
parser.add_argument('--batch-size',
type=int)
parser.add_argument('--nogpu',
action='store_true',
help="Don't use the GPU")
args = vars(parser.parse_args())
classify(args['caffemodel'], args['deploy_file'], args['image_file'],
args['mean'], args['labels'], args['batch_size'], not args['nogpu'])
print 'Script took %f seconds.' % (time.time() - script_start_time,)
| {
"content_hash": "8f86bfb0ec3dc6e91b42a4452870ac23",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 106,
"avg_line_length": 31.321428571428573,
"alnum_prop": 0.6031927023945268,
"repo_name": "PatrickChrist/CDTM-Deep-Learning-Drones",
"id": "4a8fe15f05ee2513a05ea5092584aa3b4c59b238",
"size": "7986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Teams/Arucinator/test/example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "338041"
},
{
"name": "HTML",
"bytes": "83637"
},
{
"name": "JavaScript",
"bytes": "69786"
},
{
"name": "Matlab",
"bytes": "6419"
},
{
"name": "Python",
"bytes": "275359"
}
],
"symlink_target": ""
} |
def foobar(whop):
if whop>"pohrw": return "foox"
else: return "barbar"
# the equivalent in expression-form:
foobar = lambda whop: ("foox", "barbar")[whop<="pohrw"]
# in general, we MUST ensure the 'conditional' turns
# into a 0 or 1 -- the 'not' operator is handy for that:
# not needed in the if-else case:
def plok(anything):
if anything: return "xok"
else: return "plik"
# but necessary when moving to expression-form:
plok = lambda anything: ("xok", "plik")[not anything]
# sometimes we need the shortcircuiting operators, 'and'
# and 'or', to avoid evaluating an incorrect expression:
def zod(plud):
if plud: return 16+4/plud
else: return 23
# must use and & or, NOT (...)[], as the operator-idiom:
zod = lambda plud: (plud and (16+4/plud)) or 23
# but if (16+4/plud)==0 [plud == -0.25] this erroneously
# returns 23! A full solution ALSO requires indexing:
zod = lambda plud: ((plud and [16+4/plud]) or [23])[0]
# since the non-empty list [16+4/plud] is always 'true'
| {
"content_hash": "75fa3277a1e33f5911d798f300f4ed23",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 56,
"avg_line_length": 33.5,
"alnum_prop": 0.6746268656716418,
"repo_name": "ActiveState/code",
"id": "16c4fc2e3678e54cdd7efe3fc9b58f92ddf57146",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/52310_conditionals_in_expressions/recipe-52310.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""Test file for run_python_file.
This file is executed two ways::
$ coverage run try_execfile.py
and::
$ python try_execfile.py
The output is compared to see that the program execution context is the same
under coverage and under Python.
It is not crucial that the execution be identical, there are some differences
that are OK. This program canonicalizes the output to gloss over those
differences and get a clean diff.
"""
import itertools
import json
import os
import sys
# sys.path varies by execution environments. Coverage.py uses setuptools to
# make console scripts, which means pkg_resources is imported. pkg_resources
# removes duplicate entries from sys.path. So we do that too, since the extra
# entries don't affect the running of the program.
def same_file(p1, p2):
"""Determine if `p1` and `p2` refer to the same existing file."""
if not p1:
return not p2
if not os.path.exists(p1):
return False
if not os.path.exists(p2):
return False
if hasattr(os.path, "samefile"):
return os.path.samefile(p1, p2)
else:
norm1 = os.path.normcase(os.path.normpath(p1))
norm2 = os.path.normcase(os.path.normpath(p2))
return norm1 == norm2
def without_same_files(filenames):
"""Return the list `filenames` with duplicates (by same_file) removed."""
reduced = []
for filename in filenames:
if not any(same_file(filename, other) for other in reduced):
reduced.append(filename)
return reduced
cleaned_sys_path = [os.path.normcase(p) for p in without_same_files(sys.path)]
DATA = "xyzzy"
import __main__
def my_function(a):
"""A function to force execution of module-level values."""
return "my_fn(%r)" % a
FN_VAL = my_function("fooey")
loader = globals().get('__loader__')
spec = globals().get('__spec__')
# A more compact ad-hoc grouped-by-first-letter list of builtins.
CLUMPS = "ABC,DEF,GHI,JKLMN,OPQR,ST,U,VWXYZ_,ab,cd,efg,hij,lmno,pqr,stuvwxyz".split(",")
def word_group(w):
"""Figure out which CLUMP the first letter of w is in."""
for i, clump in enumerate(CLUMPS):
if w[0] in clump:
return i
return 99
builtin_dir = [" ".join(s) for _, s in itertools.groupby(dir(__builtins__), key=word_group)]
globals_to_check = {
'os.getcwd': os.getcwd(),
'__name__': __name__,
'__file__': __file__,
'__doc__': __doc__,
'__builtins__.has_open': hasattr(__builtins__, 'open'),
'__builtins__.dir': builtin_dir,
'__loader__ exists': loader is not None,
'__package__': __package__,
'__spec__ exists': spec is not None,
'DATA': DATA,
'FN_VAL': FN_VAL,
'__main__.DATA': getattr(__main__, "DATA", "nothing"),
'argv0': sys.argv[0],
'argv1-n': sys.argv[1:],
'path': cleaned_sys_path,
}
if loader is not None:
globals_to_check.update({
'__loader__.fullname': getattr(loader, 'fullname', None) or getattr(loader, 'name', None)
})
if spec is not None:
globals_to_check.update({
'__spec__.' + aname: getattr(spec, aname)
for aname in ['name', 'origin', 'submodule_search_locations', 'parent', 'has_location']
})
print(json.dumps(globals_to_check, indent=4, sort_keys=True))
| {
"content_hash": "a7f699d868f2ab065960e7acaa3ce7bb",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 97,
"avg_line_length": 29.75229357798165,
"alnum_prop": 0.6410730804810361,
"repo_name": "hugovk/coveragepy",
"id": "48f9d098c8ab587b739ee24e6bf3fd2fd78cc730",
"size": "3399",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/modules/process_test/try_execfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3467"
},
{
"name": "C",
"bytes": "51630"
},
{
"name": "CSS",
"bytes": "13550"
},
{
"name": "HTML",
"bytes": "155641"
},
{
"name": "JavaScript",
"bytes": "30478"
},
{
"name": "Makefile",
"bytes": "3473"
},
{
"name": "PowerShell",
"bytes": "7288"
},
{
"name": "Python",
"bytes": "993214"
},
{
"name": "Shell",
"bytes": "1281"
}
],
"symlink_target": ""
} |
from flask_wtf import FlaskForm
from wtforms import StringField, validators, PasswordField, TextAreaField
# TODO: RegisterForm
class RegisterForm(FlaskForm):
name = StringField(label='Name', validators=[validators.Length(min=1, max=80)])
username = StringField(label='Username', validators=[validators.Length(min=1, max=80)])
email = StringField(label='Email', validators=[validators.Length(min=1, max=120)])
password = PasswordField(label='Password',
validators=[
validators.DataRequired(),
validators.EqualTo('confirm', 'Passwords do not match')
])
confirm = PasswordField('Confirm Password')
# TODO: VideoForm
class VideoForm(FlaskForm):
title = StringField(label='Title', validators=[validators.Length(min=1, max=200)])
link = StringField(label='Link', validators=[validators.Length(min=1)])
class CommentForm(FlaskForm):
comment = TextAreaField('Comment', validators=[validators.DataRequired()]) | {
"content_hash": "6f609e792da8763a686fb0f37288c629",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 91,
"avg_line_length": 42.48,
"alnum_prop": 0.660075329566855,
"repo_name": "alaturqua/my_flask_app",
"id": "6038ff7bf83838c8eaa42f64ba6fd5c158838e84",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1364"
},
{
"name": "HTML",
"bytes": "11182"
},
{
"name": "JavaScript",
"bytes": "278"
},
{
"name": "Python",
"bytes": "9553"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutComprehension(Koan):
def test_creating_lists_with_list_comprehensions(self):
feast = ['lambs', 'sloths', 'orangutans', 'breakfast cereals',
'fruit bats']
comprehension = [delicacy.capitalize() for delicacy in feast]
self.assertEqual("Lambs", comprehension[0])
self.assertEqual("Orangutans", comprehension[2])
def test_filtering_lists_with_list_comprehensions(self):
feast = ['spam', 'sloths', 'orangutans', 'breakfast cereals',
'fruit bats']
comprehension = [delicacy for delicacy in feast if len(delicacy) > 6]
self.assertEqual(5, len(feast))
self.assertEqual(3, len(comprehension))
def test_unpacking_tuples_in_list_comprehensions(self):
list_of_tuples = [(1, 'lumberjack'), (2, 'inquisition'), (4, 'spam')]
comprehension = [ skit * number for number, skit in list_of_tuples ]
self.assertEqual("lumberjack", comprehension[0])
self.assertEqual(16, len(comprehension[2]))
def test_double_list_comprehension(self):
list_of_eggs = ['poached egg', 'fried egg']
list_of_meats = ['lite spam', 'ham spam', 'fried spam']
comprehension = [ '{0} and {1}'.format(egg, meat) for egg in list_of_eggs for meat in list_of_meats]
self.assertEqual(6, len(comprehension))
self.assertEqual("poached egg and lite spam", comprehension[0])
def test_creating_a_set_with_set_comprehension(self):
comprehension = { x for x in 'aabbbcccc'}
self.assertEqual(set(['a', 'b', 'c']), comprehension) # rememeber that set members are unique
def test_creating_a_dictionary_with_dictionary_comprehension(self):
dict_of_weapons = {'first': 'fear', 'second': 'surprise',
'third':'ruthless efficiency', 'forth':'fanatical devotion',
'fifth': None}
dict_comprehension = { k.upper(): weapon for k, weapon in dict_of_weapons.iteritems() if weapon}
self.assertEqual(False, 'first' in dict_comprehension)
self.assertEqual(True, 'FIRST' in dict_comprehension)
self.assertEqual(5, len(dict_of_weapons))
self.assertEqual(4, len(dict_comprehension))
| {
"content_hash": "0c1b795c9d84d9e8ac2eb5c2ab41cb91",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 108,
"avg_line_length": 40.410714285714285,
"alnum_prop": 0.6363234644277508,
"repo_name": "gilhooley/python_koans",
"id": "963e6bb6d6a3d223eaa90208720e74991d718abc",
"size": "2310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_comprehension.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "330757"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
from twisted.internet import defer
from synapse.api.constants import ThirdPartyEntityKind
from synapse.api.errors import CodeMessageException
from synapse.http.client import SimpleHttpClient
from synapse.events.utils import serialize_event
from synapse.util.caches.response_cache import ResponseCache
from synapse.types import ThirdPartyInstanceID
import logging
import urllib
logger = logging.getLogger(__name__)
HOUR_IN_MS = 60 * 60 * 1000
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
def _is_valid_3pe_metadata(info):
if "instances" not in info:
return False
if not isinstance(info["instances"], list):
return False
return True
def _is_valid_3pe_result(r, field):
if not isinstance(r, dict):
return False
for k in (field, "protocol"):
if k not in r:
return False
if not isinstance(r[k], str):
return False
if "fields" not in r:
return False
fields = r["fields"]
if not isinstance(fields, dict):
return False
for k in fields.keys():
if not isinstance(fields[k], str):
return False
return True
class ApplicationServiceApi(SimpleHttpClient):
"""This class manages HS -> AS communications, including querying and
pushing.
"""
def __init__(self, hs):
super(ApplicationServiceApi, self).__init__(hs)
self.clock = hs.get_clock()
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
@defer.inlineCallbacks
def query_user(self, service, user_id):
if service.url is None:
defer.returnValue(False)
uri = service.url + ("/users/%s" % urllib.quote(user_id))
response = None
try:
response = yield self.get_json(uri, {
"access_token": service.hs_token
})
if response is not None: # just an empty json object
defer.returnValue(True)
except CodeMessageException as e:
if e.code == 404:
defer.returnValue(False)
return
logger.warning("query_user to %s received %s", uri, e.code)
except Exception as ex:
logger.warning("query_user to %s threw exception %s", uri, ex)
defer.returnValue(False)
@defer.inlineCallbacks
def query_alias(self, service, alias):
if service.url is None:
defer.returnValue(False)
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
response = None
try:
response = yield self.get_json(uri, {
"access_token": service.hs_token
})
if response is not None: # just an empty json object
defer.returnValue(True)
except CodeMessageException as e:
logger.warning("query_alias to %s received %s", uri, e.code)
if e.code == 404:
defer.returnValue(False)
return
except Exception as ex:
logger.warning("query_alias to %s threw exception %s", uri, ex)
defer.returnValue(False)
@defer.inlineCallbacks
def query_3pe(self, service, kind, protocol, fields):
if kind == ThirdPartyEntityKind.USER:
required_field = "userid"
elif kind == ThirdPartyEntityKind.LOCATION:
required_field = "alias"
else:
raise ValueError(
"Unrecognised 'kind' argument %r to query_3pe()", kind
)
if service.url is None:
defer.returnValue([])
uri = "%s%s/thirdparty/%s/%s" % (
service.url,
APP_SERVICE_PREFIX,
kind,
urllib.quote(protocol)
)
try:
response = yield self.get_json(uri, fields)
if not isinstance(response, list):
logger.warning(
"query_3pe to %s returned an invalid response %r",
uri, response
)
defer.returnValue([])
ret = []
for r in response:
if _is_valid_3pe_result(r, field=required_field):
ret.append(r)
else:
logger.warning(
"query_3pe to %s returned an invalid result %r",
uri, r
)
defer.returnValue(ret)
except Exception as ex:
logger.warning("query_3pe to %s threw exception %s", uri, ex)
defer.returnValue([])
def get_3pe_protocol(self, service, protocol):
if service.url is None:
defer.returnValue({})
@defer.inlineCallbacks
def _get():
uri = "%s%s/thirdparty/protocol/%s" % (
service.url,
APP_SERVICE_PREFIX,
urllib.quote(protocol)
)
try:
info = yield self.get_json(uri, {})
if not _is_valid_3pe_metadata(info):
logger.warning("query_3pe_protocol to %s did not return a"
" valid result", uri)
defer.returnValue(None)
for instance in info.get("instances", []):
network_id = instance.get("network_id", None)
if network_id is not None:
instance["instance_id"] = ThirdPartyInstanceID(
service.id, network_id,
).to_string()
defer.returnValue(info)
except Exception as ex:
logger.warning("query_3pe_protocol to %s threw exception %s",
uri, ex)
defer.returnValue(None)
key = (service.id, protocol)
return self.protocol_meta_cache.get(key) or (
self.protocol_meta_cache.set(key, _get())
)
@defer.inlineCallbacks
def push_bulk(self, service, events, txn_id=None):
if service.url is None:
defer.returnValue(True)
events = self._serialize(events)
if txn_id is None:
logger.warning("push_bulk: Missing txn ID sending events to %s",
service.url)
txn_id = str(0)
txn_id = str(txn_id)
uri = service.url + ("/transactions/%s" %
urllib.quote(txn_id))
try:
yield self.put_json(
uri=uri,
json_body={
"events": events
},
args={
"access_token": service.hs_token
})
defer.returnValue(True)
return
except CodeMessageException as e:
logger.warning("push_bulk to %s received %s", uri, e.code)
except Exception as ex:
logger.warning("push_bulk to %s threw exception %s", uri, ex)
defer.returnValue(False)
def _serialize(self, events):
time_now = self.clock.time_msec()
return [
serialize_event(e, time_now, as_client_event=True) for e in events
]
| {
"content_hash": "5ad4a0031bce8474d5600e9cafdda163",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 78,
"avg_line_length": 32.39366515837104,
"alnum_prop": 0.5335940773851097,
"repo_name": "TribeMedia/synapse",
"id": "6893610e715b9198f4a79ba7a7bb55f3ff6ec1e0",
"size": "7767",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synapse/appservice/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4376"
},
{
"name": "HTML",
"bytes": "9046"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31852"
},
{
"name": "Python",
"bytes": "2748398"
},
{
"name": "Shell",
"bytes": "7827"
}
],
"symlink_target": ""
} |
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import authnz.models.permissioned_db_fields
class Migration(migrations.Migration):
replaces = [
("cards", "0001_initial"),
("cards", "0002_remove_waiver_combatant_combatant_waiver"),
("cards", "0003_alter_combatant_waiver"),
("cards", "0004_remove_combatant_waiver_waiver_combatant"),
]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("authnz", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Authorization",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("slug", models.CharField(max_length=255)),
("name", models.CharField(max_length=255)),
("is_primary", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name="Card",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("card_date", models.DateField()),
],
),
migrations.CreateModel(
name="CardReminder",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("reminder_date", models.DateField()),
("is_expiry", models.BooleanField()),
],
),
migrations.CreateModel(
name="Combatant",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("uuid", models.UUIDField(default=uuid.uuid4, editable=False)),
("card_id", models.CharField(max_length=255)),
("last_update", models.DateTimeField(auto_now=True)),
("email", models.CharField(max_length=255, unique=True)),
("sca_name", models.CharField(blank=True, max_length=255, null=True)),
(
"legal_name",
authnz.models.permissioned_db_fields.PermissionedCharField(
max_length=255
),
),
(
"phone",
authnz.models.permissioned_db_fields.PermissionedCharField(
max_length=255
),
),
(
"address1",
authnz.models.permissioned_db_fields.PermissionedCharField(
max_length=255
),
),
(
"address2",
authnz.models.permissioned_db_fields.PermissionedCharField(
blank=True, max_length=255, null=True
),
),
(
"city",
authnz.models.permissioned_db_fields.PermissionedCharField(
max_length=255
),
),
(
"province",
authnz.models.permissioned_db_fields.PermissionedCharField(
default="ON", max_length=2
),
),
(
"postal_code",
authnz.models.permissioned_db_fields.PermissionedCharField(
max_length=7
),
),
(
"dob",
authnz.models.permissioned_db_fields.PermissionedDateField(
blank=True, max_length=255, null=True
),
),
(
"member_number",
authnz.models.permissioned_db_fields.PermissionedIntegerField(
blank=True, null=True
),
),
(
"member_expiry",
authnz.models.permissioned_db_fields.PermissionedDateField(
blank=True, null=True
),
),
],
),
migrations.CreateModel(
name="Discipline",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("slug", models.CharField(max_length=255)),
("name", models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name="Waiver",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("waiver_date", models.DateField()),
(
"combatant",
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="waiver",
to="cards.combatant",
),
),
],
),
migrations.CreateModel(
name="WaiverReminder",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("reminder_date", models.DateField()),
("is_expiry", models.BooleanField(default=False)),
(
"waiver",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="reminders",
to="cards.waiver",
),
),
],
),
migrations.CreateModel(
name="UserGlobalRole",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"role",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to="authnz.permission",
),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="UserDisciplineRole",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"discipline",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to="cards.discipline",
),
),
(
"role",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to="authnz.permission",
),
),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Marshal",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("slug", models.CharField(max_length=255)),
("name", models.CharField(max_length=255)),
(
"discipline",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="marshals",
to="cards.discipline",
),
),
],
),
migrations.CreateModel(
name="CombatantWarrant",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("uuid", models.UUIDField(default=uuid.uuid4, editable=False)),
(
"card",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="cards.card"
),
),
(
"marshal",
models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING,
to="cards.marshal",
),
),
],
options={
"db_table": "cards_combatant_warrant",
},
),
migrations.CreateModel(
name="CombatantAuthorization",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("uuid", models.UUIDField(default=uuid.uuid4, editable=False)),
(
"authorization",
models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING,
to="cards.authorization",
),
),
(
"card",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="cards.card"
),
),
],
options={
"db_table": "cards_combatant_authorization",
},
),
migrations.AddIndex(
model_name="combatant",
index=models.Index(fields=["uuid"], name="cards_comba_uuid_e4cbe6_idx"),
),
migrations.AddIndex(
model_name="combatant",
index=models.Index(
fields=["card_id"], name="cards_comba_card_id_b38110_idx"
),
),
migrations.AddField(
model_name="cardreminder",
name="card",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to="cards.card"
),
),
migrations.AddField(
model_name="card",
name="authorizations",
field=models.ManyToManyField(
through="cards.CombatantAuthorization", to="cards.authorization"
),
),
migrations.AddField(
model_name="card",
name="combatant",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="cards",
to="cards.combatant",
),
),
migrations.AddField(
model_name="card",
name="discipline",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="cards.discipline"
),
),
migrations.AddField(
model_name="card",
name="warrants",
field=models.ManyToManyField(
through="cards.CombatantWarrant", to="cards.marshal"
),
),
migrations.AddField(
model_name="authorization",
name="discipline",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="authorizations",
to="cards.discipline",
),
),
migrations.AddIndex(
model_name="combatantwarrant",
index=models.Index(fields=["uuid"], name="cards_comba_uuid_df78d5_idx"),
),
migrations.AddIndex(
model_name="combatantauthorization",
index=models.Index(fields=["uuid"], name="cards_comba_uuid_732cb3_idx"),
),
migrations.AddConstraint(
model_name="card",
constraint=models.UniqueConstraint(
fields=("combatant", "discipline"), name="combatant_card"
),
),
migrations.RemoveField(
model_name="waiver",
name="combatant",
),
migrations.AddField(
model_name="waiver",
name="combatant",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="waiver",
to="cards.combatant",
),
),
]
| {
"content_hash": "d662781458652dc8dfa475de2caa924e",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 86,
"avg_line_length": 33.66150442477876,
"alnum_prop": 0.38534341110745973,
"repo_name": "lrt512/emol",
"id": "a1bde4641b9848c31a39530e6488f1de4f74f281",
"size": "15264",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "emol/cards/migrations/0001_squashed_0004_remove_combatant_waiver_waiver_combatant.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5404"
},
{
"name": "HTML",
"bytes": "36437"
},
{
"name": "JavaScript",
"bytes": "31682"
},
{
"name": "Less",
"bytes": "5352"
},
{
"name": "Python",
"bytes": "153090"
}
],
"symlink_target": ""
} |
"""
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format
"""
from distutils.version import LooseVersion
import os
from docutils import nodes
from docutils.parsers.rst import directives, Directive, roles
from docutils.parsers.rst.directives import images
from docutils.parsers.rst.roles import set_classes
import sphinx
try:
from sphinx.builders.html import JSONHTMLBuilder
except ImportError:
from sphinxcontrib.serializinghtml import JSONHTMLBuilder
from sphinx.directives.code import CodeBlock
from sphinx.locale import _
from sphinx.writers.html import HTMLTranslator
def true_false(argument):
return directives.choice(argument, ('true', 'false'))
def static_dynamic(argument):
return directives.choice(argument, ('static', 'dynamic'))
class TitlesCache(object):
titles = {}
@staticmethod
def _document_key(document):
return hash(document)
@classmethod
def set_title(cls, document, title):
cls.titles[cls._document_key(document)] = title
@classmethod
def get_title(cls, document):
return cls.titles.get(cls._document_key(document), None)
@classmethod
def has_title(cls, document):
return cls._document_key(document) in cls.titles
class JSONConfluenceBuilder(JSONHTMLBuilder):
"""For backward compatibility"""
name = 'json_conf'
def __init__(self, app):
super(JSONConfluenceBuilder, self).__init__(app)
if LooseVersion(sphinx.__version__) >= LooseVersion("1.4"):
self.translator_class = HTMLConfluenceTranslator
self.warn('json_conf builder is deprecated and will be removed in future releases')
class HTMLConfluenceTranslator(HTMLTranslator):
def unimplemented_visit(self, node):
self.builder.warn('Unimplemented visit is not implemented for node: {}'.format(node))
def unknown_visit(self, node):
self.builder.warn('Unknown visit is not implemented for node: {}'.format(node))
def visit_admonition(self, node, name=''):
"""
Info, Tip, Note, and Warning Macros
https://confluence.atlassian.com/conf58/info-tip-note-and-warning-macros-771892344.html
<ac:structured-macro ac:name="info">
<ac:parameter ac:name="icon">false</ac:parameter>
<ac:parameter ac:name="title">This is my title</ac:parameter>
<ac:rich-text-body>
<p>
This is important information.
</p>
</ac:rich-text-body>
</ac:structured-macro>
"""
confluence_admonition_map = {
'note': 'info',
'warning': 'note',
'attention': 'note',
'hint': 'tip',
'tip': 'tip',
'important': 'warning',
'error': 'warning',
'danger': 'warning',
}
admonition_type = confluence_admonition_map.get(name, 'info')
macro = """\
<ac:structured-macro ac:name="{admonition_type}">
<ac:parameter ac:name="icon">true</ac:parameter>
<ac:parameter ac:name="title"></ac:parameter>
<ac:rich-text-body>
"""
self.body.append(macro.format(admonition_type=admonition_type))
def depart_admonition(self, node=None):
macro = """
</ac:rich-text-body>
</ac:structured-macro>\n
"""
self.body.append(macro)
def imgtag(self, filename, suffix='\n', **attributes):
"""
Attached image
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format#ConfluenceStorageFormat-Images
<ac:image>
<ri:attachment ri:filename="atlassian_logo.gif" />
</ac:image>
Supported image attributes (some of these attributes mirror the equivalent HTML 4 IMG element):
Name Description
---- -----------
ac:align image alignment
ac:border Set to "true" to set a border
ac:class css class attribute.
ac:title image tool tip.
ac:style css style
ac:thumbnail Set to "true" to designate this image as a thumbnail.
ac:alt alt text
ac:height image height
ac:width image width
"""
prefix = []
atts = {}
for (name, value) in attributes.items():
atts[name.lower()] = value
attlist = atts.items()
attlist = sorted(attlist)
parts = []
src_part = '<ri:attachment ri:filename="%s" />' % filename
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
value = u' '.join(map(unicode, value))
else:
# First assume Python 2
try:
value = unicode(value)
# Otherwise, do it the Python 3 way
except NameError:
value = str(value)
parts.append('ac:%s="%s"' % (name.lower(), self.attval(value)))
infix = '</ac:image>'
return ''.join(prefix) + '<ac:image %s>%s%s' % (' '.join(parts), src_part, infix) + suffix
def visit_image(self, node):
atts = {}
uri = node['uri']
filename = os.path.basename(uri)
atts['alt'] = node.get('alt', uri)
atts['thumbnail'] = 'true'
if 'width' in node:
atts['width'] = node['width']
if 'name' in node:
atts['title'] = node['name']
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
self.context.append('')
self.body.append(self.imgtag(filename, suffix, **atts))
def visit_title(self, node):
if isinstance(node.parent, nodes.section) and not TitlesCache.has_title(self.document):
h_level = self.section_level + self.initial_header_level - 1
if h_level == 1:
# Confluence take first title for page title from rst
# It use for making internal links
TitlesCache.set_title(self.document, node.children[0])
# ignore first header; document must have title header
raise nodes.SkipNode
HTMLTranslator.visit_title(self, node)
def visit_target(self, node):
"""
Anchor Macro
https://confluence.atlassian.com/display/DOC/Anchor+Macro
<ac:structured-macro ac:name="anchor">
<ac:parameter ac:name="">here</ac:parameter>
</ac:structured-macro>
"""
# Anchor confluence macros
anchor_macros = """
<ac:structured-macro ac:name="anchor">
<ac:parameter ac:name="">%s</ac:parameter>
</ac:structured-macro>
"""
if 'refid' in node or 'refname' in node:
if 'refuri' in node:
link = node['refuri']
elif 'refid' in node:
link = node['refid']
else:
link = node['refname']
self.body.append(anchor_macros % link)
def depart_target(self, node):
pass
def visit_literal_block(self, node):
"""
Code Block Macro
https://confluence.atlassian.com/display/DOC/Code+Block+Macro
<ac:structured-macro ac:name="code">
<ac:parameter ac:name="title">This is my title</ac:parameter>
<ac:parameter ac:name="theme">FadeToGrey</ac:parameter>
<ac:parameter ac:name="linenumbers">true</ac:parameter>
<ac:parameter ac:name="language">xml</ac:parameter>
<ac:parameter ac:name="firstline">0001</ac:parameter>
<ac:parameter ac:name="collapse">true</ac:parameter>
<ac:plain-text-body><![CDATA[<b>This is my code</b>]]></ac:plain-text-body>
</ac:structured-macro>
"""
parts = ['<ac:structured-macro ac:name="code">']
if 'language' in node:
# Collapsible argument
if node['language'] == 'collapse':
parts.append('<ac:parameter ac:name="collapse">true</ac:parameter>')
valid = ['actionscript3', 'bash', 'csharp', 'coldfusion', 'cpp', 'css', 'delphi', 'diff', 'erlang',
'groovy', 'html/xml', 'java', 'javafx', 'javascript', 'none', 'perl', 'php', 'powershell',
'python', 'ruby', 'scala', 'sql', 'vb']
if node['language'] not in valid:
node['language'] = 'none'
parts.append('<ac:parameter ac:name="language">%s</ac:parameter>' % node['language'])
if 'linenos' in node and node['linenos']:
parts.append('<ac:parameter ac:name="linenumbers">true</ac:parameter>')
if 'caption' in node and node['caption']:
parts.append('<ac:parameter ac:name="title">%s</ac:parameter>' % node['caption'])
parts.append('<ac:plain-text-body><![CDATA[%s]]></ac:plain-text-body>' % node.rawsource)
parts.append('</ac:structured-macro>')
self.body.append(''.join(parts))
raise nodes.SkipNode
def visit_download_reference(self, node):
"""
Link to an attachment
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format#ConfluenceStorageFormat-Links
<ac:link>
<ri:attachment ri:filename="atlassian_logo.gif" />
<ac:plain-text-link-body><![CDATA[Link to a Confluence Attachment]]></ac:plain-text-link-body>
</ac:link>
"""
if 'filename' not in node:
self.context.append('')
return
text = None
if len(node.children) > 0 and len(node.children[0].children) > 0:
text = node.children[0].children[0]
parts = [
'<ac:link>',
'<ri:attachment ri:filename="%s" />' % node['filename'],
'<ac:plain-text-link-body>',
'<![CDATA[%s]]>' % text if text else '',
'</ac:plain-text-link-body>',
'</ac:link>',
]
self.body.append(''.join(parts))
raise nodes.SkipNode
def visit_section(self, node):
# removed section open tag
self.section_level += 1
def depart_section(self, node):
# removed section close tag
self.section_level -= 1
def visit_reference(self, node):
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = ''
# Confluence makes internal links with prefix from page title
if node.get('internal') and TitlesCache.has_title(self.document):
atts['href'] += '#%s-' % TitlesCache.get_title(self.document).replace(' ', '')
atts['href'] += node['refuri']
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = 1
else:
assert 'refid' in node, 'References must have "refuri" or "refid" attribute.'
atts['href'] = ''
# Confluence makes internal links with prefix from page title
if node.get('internal') and TitlesCache.has_title(self.document):
atts['href'] += '#%s-' % TitlesCache.get_title(self.document).replace(' ', '')
atts['href'] += node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) % '.'.join(map(str, node['secnumber'])))
def visit_desc(self, node):
""" Replace <dl> """
self.body.append(self.starttag(node, 'div', style="margin-top: 10px"))
def depart_desc(self, node):
self.body.append('</div>\n\n')
def visit_desc_signature(self, node):
""" Replace <dt> """
# the id is set automatically
self.body.append(self.starttag(
node, 'div', style='margin-left: 20px; font-weight: bold;'))
# anchor for per-desc interactive data
if node.parent['objtype'] != 'describe' and node['ids'] and node['first']:
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
""" Copy-paste from original method """
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</div>')
def visit_desc_content(self, node):
""" Replace <dd> """
self.body.append(self.starttag(
node, 'div', '', style='margin-left: 40px;'))
def depart_desc_content(self, node):
self.body.append('</div>')
def visit_table(self, node):
""" Fix ugly table border
"""
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="0"))
def write_colspecs(self):
""" Fix ugly column width
"""
pass
class ImageConf(images.Image):
"""
Image confluence directive
"""
def run(self):
# remove 'align' processing
# remove 'target' processing
self.options.pop('align', None)
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
return [image_node]
class TocTree(Directive):
"""
Replace sphinx "toctree" directive to confluence macro
Table of Contents Macro
https://confluence.atlassian.com/display/DOC/Table+of+Contents+Macro
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="style">square</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
</ac:structured-macro>
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'maxdepth': int,
'name': directives.unchanged,
'caption': directives.unchanged_required,
'glob': directives.flag,
'hidden': directives.flag,
'includehidden': directives.flag,
'titlesonly': directives.flag,
}
def run(self):
macro = """
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="style">square</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
</ac:structured-macro>\n
"""
attributes = {'format': 'html'}
raw_node = nodes.raw('', macro, **attributes)
return [raw_node]
class JiraIssuesDirective(Directive):
"""
JIRA Issues Macro
https://confluence.atlassian.com/doc/jira-issues-macro-139380.html
<ac:structured-macro ac:name="jira" ac:schema-version="1" ac:macro-id="da6b6413-0b93-4052-af90-dbb252175860">
<ac:parameter ac:name="server">Atlassian JIRA (JAC)</ac:parameter>
<ac:parameter ac:name="columns">key,summary,created</ac:parameter>
<ac:parameter ac:name="maximumIssues">20</ac:parameter>
<ac:parameter ac:name="jqlQuery">project = CONF AND FixVersion=5.8 </ac:parameter>
<ac:parameter ac:name="serverId">146780e9-1234-312f-1243-ed0555666fa</ac:parameter>
</ac:structured-macro>
"""
required_arguments = 1
has_content = False
final_argument_whitespace = True
option_spec = {
"anonymous": true_false,
"server_id": directives.unchanged,
"baseurl": directives.unchanged,
"columns": directives.unchanged,
"count": true_false,
"height": directives.positive_int,
"title": directives.unchanged,
"render_mode": static_dynamic,
"url": directives.unchanged,
"width": directives.unchanged,
"maximum_issues": directives.positive_int
}
def run(self):
result = ['<ac:structured-macro ac:name="jira" ac:schema-version="1">']
param_macro = '<ac:parameter ac:name="{name}">{value}</ac:parameter>'
for name, value in self.options.items():
result.append(param_macro.format(name=underscore_to_camelcase(name), value=value))
jql_query = self.arguments[0]
result.append(param_macro.format(name='jqlQuery', value=jql_query))
result.append('</ac:structured-macro>')
attributes = {'format': 'html'}
raw_node = nodes.raw('', '\n'.join(result), **attributes)
return [raw_node]
class JiraIssueRole(roles.GenericRole):
def __call__(self, role, rawtext, text, *args, **kwargs):
macro = """\
<ac:structured-macro ac:name="jira" ac:schema-version="1">
<ac:parameter ac:name="key">{key}</ac:parameter>
<ac:parameter ac:name="showSummary">false</ac:parameter>
</ac:structured-macro>
"""
attributes = {'format': 'html'}
return [nodes.raw('', macro.format(key=text), **attributes)], []
class JiraUserRole(roles.GenericRole):
def __call__(self, role, rawtext, text, *args, **kwargs):
macro = """\
<ac:link>
<ri:user ri:username="{username}"/>
</ac:link>
"""
attributes = {'format': 'html'}
return [nodes.raw('', macro.format(username=text), **attributes)], []
class CaptionedCodeBlock(CodeBlock):
def run(self):
ret = super(CaptionedCodeBlock, self).run()
caption = self.options.get('caption')
if caption and isinstance(ret[0], nodes.container):
container_node = ret[0]
if isinstance(container_node[0], nodes.caption):
container_node[1]['caption'] = caption
return [container_node[1]]
return ret
def underscore_to_camelcase(text):
return ''.join(word.title() if i else word for i, word in enumerate(text.split('_')))
def get_path():
from os import path
package_dir = path.abspath(path.dirname(__file__))
template_path = path.join(package_dir, 'themes')
return template_path
def setup(app):
"""
:type app: sphinx.application.Sphinx
"""
app.config.html_theme_path = [get_path()]
app.config.html_theme = 'confluence'
app.config.html_scaled_image_link = False
if LooseVersion(sphinx.__version__) >= LooseVersion("1.4"):
app.set_translator("html", HTMLConfluenceTranslator)
app.set_translator("json", HTMLConfluenceTranslator)
else:
app.config.html_translator_class = 'sphinx_confluence.HTMLConfluenceTranslator'
app.config.html_add_permalinks = ''
jira_issue = JiraIssueRole('jira_issue', nodes.Inline)
app.add_role(jira_issue.name, jira_issue)
jira_user = JiraUserRole('jira_user', nodes.Inline)
app.add_role(jira_user.name, jira_user)
app.add_directive('image', ImageConf)
app.add_directive('toctree', TocTree)
app.add_directive('jira_issues', JiraIssuesDirective)
app.add_directive('code-block', CaptionedCodeBlock)
app.add_builder(JSONConfluenceBuilder)
| {
"content_hash": "e7ab933246e0d7a073688a074b3e140f",
"timestamp": "",
"source": "github",
"line_count": 588,
"max_line_length": 113,
"avg_line_length": 34.197278911564624,
"alnum_prop": 0.5830515217823752,
"repo_name": "Arello-Mobile/sphinx-confluence",
"id": "c7f8f3432d34a1b560dcfcd20f59bbc6a1e1a219",
"size": "20132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinx_confluence/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "408"
},
{
"name": "Python",
"bytes": "20995"
}
],
"symlink_target": ""
} |
import os
import pytest
import subprocess
import sys
import tempfile
TOP_SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def test_bootstrap_in_source_dir(env):
"""
Running the bootstrap script from the source directory should fail.
"""
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call([
sys.executable, "bootstrap.py",
"--board", "cubietruck", "--toolchain", "local"
], cwd=TOP_SRC_DIR)
def test_no_toolchain_for_board(env):
"""
Running the bootstrap script from the source directory should fail,
"""
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call([
sys.executable, "bootstrap.py",
"--board", "cubietruck", "--toolchain", "local"
], cwd=TOP_SRC_DIR)
@pytest.mark.parametrize("variant", [None, "xen", "board"])
@pytest.mark.parametrize("toolchain", ["armv7-eabihf"])
def test_quick_cubietruck_bootstrap(env, variant, toolchain):
"""
This test runs a bootstrap for the different cubietruck variants.
It uses the available toolchains. Nothing is downloaded.
"""
cmd = [
sys.executable,
os.path.join(TOP_SRC_DIR, "bootstrap.py"),
"--board", "cubietruck",
"--toolchain", toolchain,
"--no-download",
]
if variant is not None:
cmd.extend(['--board-variant', variant])
subprocess.check_call(cmd, cwd=env.build_dir)
@pytest.mark.parametrize("source", ["linux-4.12.0"])
@pytest.mark.parametrize("config", [
"linux-4.12-sunxi", "linux-4.12-sunxi-xen-dom0", "linux-4.12-xen-domu"
])
@pytest.mark.parametrize("toolchain", ["armv7-eabihf"])
def test_bootstrap_kernel_only(env, source, config, toolchain):
build_dir = tempfile.TemporaryDirectory()
subprocess.check_call([
sys.executable,
os.path.join(TOP_SRC_DIR, "bootstrap.py"),
"--kernel", source, config,
"--toolchain", toolchain,
"--no-download",
], cwd=build_dir.name)
@pytest.mark.parametrize("source", ["2017.07"])
@pytest.mark.parametrize("config", ["2017.07-minimal"])
@pytest.mark.parametrize("toolchain", ["armv7-eabihf"])
def test_bootstrap_uboot_only(env, source, config, toolchain):
build_dir = tempfile.TemporaryDirectory()
subprocess.check_call([
sys.executable,
os.path.join(TOP_SRC_DIR, "bootstrap.py"),
"--uboot", source, config,
"--toolchain", toolchain,
"--no-download",
], cwd=build_dir.name)
@pytest.mark.parametrize("source", ["4.8.2"])
@pytest.mark.parametrize("config", ["4.8-sunxi"])
@pytest.mark.parametrize("toolchain", ["armv7-eabihf"])
def test_bootstrap_xen_only(env, source, config, toolchain):
build_dir = tempfile.TemporaryDirectory()
subprocess.check_call([
sys.executable,
os.path.join(TOP_SRC_DIR, "bootstrap.py"),
"--xen", source, config,
"--toolchain", toolchain,
"--no-download",
], cwd=build_dir.name)
| {
"content_hash": "e168caa4d905f2e7613ae1d70a3d9e7c",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 74,
"avg_line_length": 33.87640449438202,
"alnum_prop": 0.6398009950248756,
"repo_name": "sbxg/sbxg",
"id": "cadf169dec497cf513017829733cee550d42dec4",
"size": "4110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_boostrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59297"
},
{
"name": "Shell",
"bytes": "4354"
}
],
"symlink_target": ""
} |
"""
==========
Overview
==========
Propositions
============
First, let's create some propositions using the :class:`PropLiteral`
constructor. All propositions are atomic, that is, either positive or
negative literals.
>>> kill = PropLiteral('kill')
>>> kill.polarity
True
>>> intent = PropLiteral('intent')
>>> murder = PropLiteral('murder')
>>> witness1 = PropLiteral('witness1')
>>> unreliable1 = PropLiteral('unreliable1')
>>> witness2 = PropLiteral('witness2')
>>> unreliable2 = PropLiteral('unreliable2')
The :meth:`negate` method allows us to introduce negated propositions.
>>> neg_intent = intent.negate()
>>> print(neg_intent)
-intent
>>> neg_intent.polarity
False
>>> neg_intent == intent
False
>>> neg_intent.negate() == intent
True
Arguments
=========
Arguments are built with the :class:`Argument` constructor. They are required
to have a conclusion, and may also have premises and exceptions.
>>> arg1 = Argument(murder, premises={kill, intent})
>>> arg2 = Argument(intent, premises={witness1}, exceptions={unreliable1})
>>> arg3 = Argument(neg_intent, premises={witness2}, exceptions={unreliable2})
>>> print(arg1)
[intent, kill], ~[] => murder
In order to organise the dependencies between the conclusion of an argument
and its premises and exceptions, we model them using a directed graph called
an :class:`ArgumentSet`. Notice that the premise of one argument (e.g., the
``intent`` premise of ``arg1``) can be the conclusion of another argument (i.e.,
``arg2``)).
>>> argset = ArgumentSet()
>>> argset.add_argument(arg1, arg_id='arg1')
>>> argset.add_argument(arg2, arg_id='arg2')
>>> argset.add_argument(arg3, arg_id='arg3')
There is a :func:`draw` method which allows us to view the resulting graph.
>>> argset.draw() # doctest: +SKIP
Proof Standards
===============
In evaluating the relative value of arguments for a particular conclusion
``p``, we need to determine what standard of *proof* is required to establish
``p``. The notion of proof used here is not formal proof in a logical
system. Instead, it tries to capture how substantial the arguments are
in favour of, or against, a particular conclusion.
The :class:`ProofStandard` constructor is initialised with a list of
``(proposition, name-of-proof-standard)`` pairs. The default proof standard,
viz., ``'scintilla'``, is the weakest level. Different
propositions can be assigned different proof standards that they need
to attain.
>>> ps = ProofStandard([(intent, "beyond_reasonable_doubt")],
... default='scintilla')
Carneades Argument Evaluation Structure
=======================================
The core of the argumentation model is a data structure plus set of
rules for evaluating arguments; this is called a Carneades Argument
Evaluation Structure (CAES). A CAES consists of a set of arguments,
an audience (or jury), and a method for determining whether propositions
satisfy the relevant proof standards.
The role of the audience is modeled as an :class:`Audience`, consisting
of a set of assumed propositions, and an assignment of weights to
arguments.
>>> assumptions = {kill, witness1, witness2, unreliable2}
>>> weights = {'arg1': 0.8, 'arg2': 0.3, 'arg3': 0.8}
>>> audience = Audience(assumptions, weights)
Once an audience has been defined, we can use it to initialise a
:class:`CAES`, together with instances of :class:`ArgumentSet` and
:class:`ProofStandard`:
>>> caes = CAES(argset, audience, ps)
>>> caes.get_all_arguments()
[intent, kill], ~[] => murder
[witness1], ~[unreliable1] => intent
[witness2], ~[unreliable2] => -intent
The :meth:`get_arguments` method returns the list of arguments in an
:class:`ArgumentSet` which support a given proposition.
A proposition is said to be *acceptable* in a CAES if it meets its required
proof standard. The process of checking whether a proposition meets its proof
standard requires another notion: namely, whether the arguments that support
it are *applicable*. An argument ``arg`` is applicable if and only if all its
premises either belong to the audience's assumptions or are acceptable;
moreover, the exceptions of ``arg`` must not belong to the assumptions or be
acceptable. For example, `arg2`, which supports the conclusion `intent`, is
acceptable since `witness1` is an assumption, while the exception
`unreliable1` is neither an assumption nor acceptable.
>>> arg_for_intent = argset.get_arguments(intent)[0]
>>> print(arg_for_intent)
[witness1], ~[unreliable1] => intent
>>> caes.applicable(arg_for_intent)
True
>>> caes.acceptable(intent)
False
Although there is an argument (``arg3``) for `-intent`, it is not applicable,
since the exception `unreliable2` does belong to the audience's assumptions.
>>> any(caes.applicable(arg) for arg in argset.get_arguments(neg_intent))
False
This in turn has the consequence that `-intent` is not acceptable.
>>> caes.acceptable(neg_intent)
False
Despite the fact that the argument `arg2` for `murder` is applicable,
the conclusion `murder` is not acceptable, since
>>> caes.acceptable(murder)
False
>>> caes.acceptable(murder.negate())
False
"""
from collections import namedtuple, defaultdict
import logging
import os
import sys
from igraph import Graph, plot
# fix to ensure that package is loaded properly on system path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from carneades.tracecalls import TraceCalls
LOGLEVEL = logging.DEBUG
# Uncomment the following line to raise the logging level and thereby turn off
# debug messages
# LOGLEVEL = logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=LOGLEVEL)
class PropLiteral(object):
"""
Proposition literals have most of the properties of ordinary strings,
except that the negation method is Boolean; i.e.
>>> a = PropLiteral('a')
>>> a.negate().negate() == a
True
"""
def __init__(self, string, polarity=True):
"""
Propositions are either positive or negative atoms.
"""
self.polarity = polarity
self._string = string
def negate(self):
"""
Negation of a proposition.
We create a copy of the current proposition and flip its polarity.
"""
polarity = (not self.polarity)
return PropLiteral(self._string, polarity=polarity)
def __str__(self):
"""
Override ``__str__()`` so that negation is realised as a prefix on the
string.
"""
if self.polarity:
return self._string
return "-" + self._string
def __hash__(self):
return self._string.__hash__()
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__str__() == other.__str__()
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.__str__() < other.__str__()
class Argument(object):
"""
An argument consists of a conclusion, a set of premises and a set of
exceptions (both of which can be empty).
Although arguments should have identifiers (`arg_id`), it is preferable
to specify these when calling the :meth:`add_argument` method of
:class:`ArgumentSet`.
"""
def __init__(self, conclusion, premises=set(), exceptions=set()):
"""
:param conclusion: The conclusion of the argument.
:type conclusion: :class:`PropLiteral`
:param premises: The premises of the argument.
:type premises: set(:class:`PropLiteral`)
:param exceptions: The exceptions of the argument
:type exceptions: set(:class:`PropLiteral`)
"""
self.conclusion = conclusion
self.premises = premises
self.exceptions = exceptions
self.arg_id = None
def __str__(self):
"""
Define print string for arguments.
We follow similar conventions to those used by the CarneadesDSL
Haskell implementation.
Premises and exceptions are sorted to facilitate doctest comparison.
"""
if len(self.premises) == 0:
prems = "[]"
else:
prems = sorted(self.premises)
if len(self.exceptions) == 0:
excepts = "[]"
else:
excepts = sorted(self.exceptions)
return "{}, ~{} => {}".format(prems, excepts, self.conclusion)
class ArgumentSet(object):
"""
An ``ArgumentSet`` is modeled as a dependency graph where vertices represent
the components of an argument. A vertex corresponding to the conclusion
of an argument *A* will **depend on** the premises and exceptions in *A*.
The graph is built using the `igraph <http://igraph.org/>`_ library. This
allows *attributes* to be associated with both vertices and edges.
Attributes are represented as Python dictionaries where the key (which
must be a string) is the name of the attribute and the value is the
attribute itself. For more details, see the
`igraph tutorial\
<http://igraph.org/python/doc/tutorial/tutorial.html#setting-and-retrieving-attributes>`_.
"""
def __init__(self):
self.graph = Graph()
self.graph.to_directed()
self.arg_count = 1
self.arguments = []
def propset(self):
"""
The set of :class:`PropLiteral`\ s represented by the vertices in
the graph.
Retrieving this set relies on the fact that :meth:`add_proposition`
sets a value for the ``prop`` attribute in vertices created when a
new proposition is added to the graph.
"""
g = self.graph
props = set()
try:
props = {p for p in g.vs['prop']}
except KeyError:
pass
return props
def add_proposition(self, proposition):
"""
Add a proposition to a graph if it is not already present as a vertex.
:param proposition: The proposition to be added to the graph.
:type proposition: :class:`PropLiteral`
:return: The graph vertex corresponding to the proposition.
:rtype: :class:`Graph.Vertex`
:raises TypeError: if the input is not a :class:`PropLiteral`.
"""
if isinstance(proposition, PropLiteral):
if proposition in self.propset():
logging.debug("Proposition '{}' is already in graph".\
format(proposition))
else:
# add the proposition as a vertex attribute, recovered via the
# key 'prop'
self.graph.add_vertex(prop=proposition)
logging.debug("Added proposition '{}' to graph".\
format(proposition))
return self.graph.vs.select(prop=proposition)[0]
else:
raise TypeError('Input {} should be PropLiteral'.\
format(proposition))
def add_argument(self, argument, arg_id=None):
"""
Add an argument to the graph.
:parameter argument: The argument to be added to the graph.
:type argument: :class:`Argument`
:parameter arg_id: The ID of the argument
:type arg_id: str or None
"""
g = self.graph
if arg_id is not None:
argument.arg_id = arg_id
else:
argument.arg_id = 'arg{}'.format(self.arg_count)
self.arg_count += 1
self.arguments.append(argument)
# add the arg_id as a vertex attribute, recovered via the 'arg' key
self.graph.add_vertex(arg=argument.arg_id)
arg_v = g.vs.select(arg=argument.arg_id)[0]
# add proposition vertices to the graph
conclusion_v = self.add_proposition(argument.conclusion)
self.add_proposition(argument.conclusion.negate())
premise_vs =\
[self.add_proposition(prop) for prop in sorted(argument.premises)]
exception_vs =\
[self.add_proposition(prop) for prop in sorted(argument.exceptions)]
target_vs = premise_vs + exception_vs
# add new edges to the graph
edge_to_arg = [(conclusion_v.index, arg_v.index)]
edges_from_arg = [(arg_v.index, target.index) for target in target_vs]
g.add_edges(edge_to_arg + edges_from_arg)
def get_arguments(self, proposition):
"""
Find the arguments for a proposition in an *ArgumentSet*.
:param proposition: The proposition to be checked.
:type proposition: :class:`PropLiteral`
:return: A list of the arguments pro the proposition
:rtype: list(:class:`Argument`)
:raises ValueError: if the input :class:`PropLiteral` isn't present\
in the graph.
"""
g = self.graph
# index of vertex associated with the proposition
vs = g.vs.select(prop=proposition)
try:
conc_v_index = vs[0].index
# IDs of vertices reachable in one hop from the proposition's vertex
target_IDs = [e.target for e in g.es.select(_source=conc_v_index)]
# the vertices indexed by target_IDs
out_vs = [g.vs[i] for i in target_IDs]
arg_IDs = [v['arg'] for v in out_vs]
args = [arg for arg in self.arguments if arg.arg_id in arg_IDs]
return args
except IndexError:
raise ValueError("Proposition '{}' is not in the current graph".\
format(proposition))
def draw(self, debug=False):
"""
Visualise an :class:`ArgumentSet` as a labeled graph.
:parameter debug: If :class:`True`, add the vertex index to the label.
"""
g = self.graph
# labels for nodes that are classed as propositions
labels = g.vs['prop']
# insert the labels for nodes that are classed as arguments
for i in range(len(labels)):
if g.vs['arg'][i] is not None:
labels[i] = g.vs['arg'][i]
if debug:
d_labels = []
for (i, label) in enumerate(labels):
d_labels.append("{}\nv{}".format(label, g.vs[i].index))
labels = d_labels
g.vs['label'] = labels
roots = [i for i in range(len(g.vs)) if g.indegree()[i] == 0]
ALL = 3 # from igraph
layout = g.layout_reingold_tilford(mode=ALL, root=roots)
plot_style = {}
plot_style['vertex_color'] = \
['lightblue' if x is None else 'pink' for x in g.vs['arg']]
plot_style['vertex_size'] = 60
plot_style['vertex_shape'] = \
['circle' if x is None else 'rect' for x in g.vs['arg']]
plot_style['margin'] = 40
plot_style['layout'] = layout
plot(g, **plot_style)
def write_to_graphviz(self, fname=None):
g = self.graph
result = "digraph G{ \n"
for vertex in g.vs:
arg_label = vertex.attributes()['arg']
prop_label = vertex.attributes()['prop']
if arg_label:
dot_str = (arg_label +
' [color="black", fillcolor="pink", width=.75, '
'shape=box, style="filled"]; \n')
elif prop_label:
dot_str = ('"{}"'.format(prop_label) +
' [color="black", fillcolor="lightblue", '
'fixedsize=true, width=1 shape="circle", '
'style="filled"]; \n')
result += dot_str
for edge in g.es:
source_label = g.vs[edge.source]['prop'] if\
g.vs[edge.source]['prop'] else g.vs[edge.source]['arg']
target_label = g.vs[edge.target]['prop'] if\
g.vs[edge.target]['prop'] else g.vs[edge.target]['arg']
result += '"{}" -> "{}"'.format(source_label, target_label)
dot_str = " ; \n"
result += dot_str
result += "}"
if fname is None:
fname = 'graph.dot'
with open(fname, 'w') as f:
print(result, file=f)
class ProofStandard(object):
"""
Each proposition in a CAES is associated with a proof standard.
A proof standard is initialised by supplying a (possibly empty) list of
pairs, each consisting of a proposition and the name of a proof standard.
>>> intent = PropLiteral('intent')
>>> ps = ProofStandard([(intent, "beyond_reasonable_doubt")])
Possible values for proof standards: `"scintilla"`, `"preponderance"`,
`"clear_and_convincing"`, `"beyond_reasonable_doubt"`, and
`"dialectical_validity"`.
"""
def __init__(self, propstandards, default='scintilla'):
"""
:param propstandards: the proof standard associated with\
each proposition under consideration.
:type propstandards: list(tuple(:class:`PropLiteral`, str))
"""
self.proof_standards = ["scintilla", "preponderance",
"clear_and_convincing",
"beyond_reasonable_doubt",
"dialectical_validity"]
self.default = default
self.config = defaultdict(lambda: self.default)
self._set_standard(propstandards)
def _set_standard(self, propstandards):
for (prop, standard) in propstandards:
if standard not in self.proof_standards:
raise ValueError("{} is not a valid proof standard".\
format(standard))
self.config[prop] = standard
def get_proofstandard(self, proposition):
"""
Determine the proof standard associated with a proposition.
:param proposition: The proposition to be checked.
:type proposition: :class:`PropLiteral`
"""
return self.config[proposition]
Audience = namedtuple('Audience', ['assumptions', 'weight'])
"""
An audience has assumptions about which premises hold and also
assigns weights to arguments.
:param assumptions: The assumptions held by the audience
:type assumptions: set(:class:`PropLiteral`)
:param weights: An mapping from :class:`Argument`\ s to weights.
:type weights: dict
"""
class CAES(object):
"""
A class that represents a Carneades Argument Evaluation Structure (CAES).
"""
def __init__(self, argset, audience, proofstandard, alpha=0.4, beta=0.3,
gamma=0.2):
"""
:parameter argset: the argument set used in the CAES
:type argset: :class:`ArgSet`
:parameter audience: the audience for the CAES
:type audience: :class:`Audience`
:parameter proofstandard: the proof standards used in the CAES
:type proofstandard: :class:`ProofStandard`
:parameter alpha: threshold of strength of argument required for a\
proposition to reach the proof standards "clear and convincing" and\
"beyond reasonable doubt".
:type alpha: float in interval [0, 1]
:parameter beta: difference required between strength of\
argument *pro* a proposition vs strength of argument *con*\
to reach the proof standard "clear and convincing".
:type beta: float in interval [0, 1]
:parameter gamma: threshold of strength of a *con* argument required\
for a proposition to reach the proof standard "beyond reasonable\
doubt".
:type gamma: float in interval [0, 1]
"""
self.argset = argset
self.assumptions = audience.assumptions
self.weight = audience.weight
self.standard = proofstandard
self.alpha = alpha
self.beta = beta
self.gamma = gamma
def get_all_arguments(self):
"""
Show all arguments in the :class:`ArgSet` of the CAES.
"""
for arg in self.argset.arguments:
print(arg)
@TraceCalls()
def applicable(self, argument):
"""
An argument is *applicable* in a CAES if it needs to be taken into
account when evaluating the CAES.
:parameter argument: The argument whose applicablility is being\
determined.
:type argument: :class:`Argument`
:rtype: bool
"""
_acceptable = lambda p: self.acceptable(p)
return self._applicable(argument, _acceptable)
def _applicable(self, argument, _acceptable):
"""
:parameter argument: The argument whose applicablility is being
determined.
:type argument: :class:`Argument`
:parameter _acceptable: The function which determines the
acceptability of a proposition in the CAES.
:type _acceptable: LambdaType
:rtype: bool
"""
logging.debug('Checking applicability of {}...'.format(argument.arg_id))
logging.debug('Current assumptions: {}'.format(self.assumptions))
logging.debug('Current premises: {}'.format(argument.premises))
b1 = all(p in self.assumptions or \
(p.negate() not in self.assumptions and \
_acceptable(p)) for p in argument.premises)
if argument.exceptions:
logging.debug('Current exception: {}'.format(argument.exceptions))
b2 = all(e not in self.assumptions and \
(e.negate() in self.assumptions or \
not _acceptable(e)) for e in argument.exceptions)
return b1 and b2
@TraceCalls()
def acceptable(self, proposition):
"""
A conclusion is *acceptable* in a CAES if it can be arrived at under
the relevant proof standards, given the beliefs of the audience.
:param proposition: The conclusion whose acceptability is to be\
determined.
:type proposition: :class:`PropLiteral`
:rtype: bool
"""
standard = self.standard.get_proofstandard(proposition)
logging.debug("Checking whether proposition '{}'"
"meets proof standard '{}'.".\
format(proposition, standard))
return self.meets_proof_standard(proposition, standard)
@TraceCalls()
def meets_proof_standard(self, proposition, standard):
"""
Determine whether a proposition meets a given proof standard.
:param proposition: The proposition which should meet the relevant\
proof standard.
:type proposition: :class:`PropLiteral`
:parameter standard: a specific level of proof;\
see :class:`ProofStandard` for admissible values
:type standard: str
:rtype: bool
"""
arguments = self.argset.get_arguments(proposition)
result = False
if standard == 'scintilla':
result = any(arg for arg in arguments if self.applicable(arg))
elif standard == 'preponderance':
result = self.max_weight_pro(proposition) > \
self.max_weight_con(proposition)
elif standard == 'clear_and_convincing':
mwp = self.max_weight_pro(proposition)
mwc = self.max_weight_con(proposition)
exceeds_alpha = mwp > self.alpha
diff_exceeds_gamma = (mwp - mwc) > self.gamma
logging.debug("max weight pro '{}' is {}".format(proposition, mwp))
logging.debug("max weight con '{}' is {}".format(proposition, mwc))
logging.debug("max weight pro '{}' > alpha '{}': {}".\
format(mwp, self.alpha, exceeds_alpha))
logging.debug("diff between pro and con = {} > gamma: {}".\
format(mwp-mwc, diff_exceeds_gamma))
result = (mwp > self.alpha) and (mwp - mwc > self.gamma)
elif standard == 'beyond_reasonable_doubt':
result = self.meets_proof_standard(proposition,
'clear_and_convincing') \
and \
self.max_weight_con(proposition) < self.gamma
return result
def weight_of(self, argument):
"""
Retrieve the weight associated by the CAES audience with an argument.
:parameter argument: The argument whose weight is being determined.
:type argument: :class:`Argument`
:return: The weight of the argument.
:rtype: float in interval [0, 1]
"""
arg_id = argument.arg_id
try:
return self.weight[arg_id]
except KeyError:
raise ValueError("No weight assigned to argument '{}'.".\
format(arg_id))
def max_weight_applicable(self, arguments):
"""
Retrieve the weight of the strongest applicable argument in a list
of arguments.
:parameter arguments: The arguments whose weight is being compared.
:type arguments: list(:class:`Argument`)
:return: The maximum of the weights of the arguments.
:rtype: float in interval [0, 1]
"""
arg_ids = [arg.arg_id for arg in arguments]
applicable_args = [arg for arg in arguments if self.applicable(arg)]
if len(applicable_args) == 0:
logging.debug('No applicable arguments in {}'.format(arg_ids))
return 0.0
applic_arg_ids = [arg.arg_id for arg in applicable_args]
logging.debug('Checking applicability and weights of {}'.\
format(applic_arg_ids))
weights = [self.weight_of(argument) for argument in applicable_args]
logging.debug('Weights of {} are {}'.format(applic_arg_ids, weights))
return max(weights)
def max_weight_pro(self, proposition):
"""
The maximum of the weights pro the proposition.
:param proposition: The conclusion whose acceptability is to be\
determined.
:type proposition: :class:`PropLiteral`
:rtype: float in interval [0, 1]
"""
args = self.argset.get_arguments(proposition)
return self.max_weight_applicable(args)
def max_weight_con(self, proposition):
"""
The maximum of the weights con the proposition.
:param proposition: The conclusion whose acceptability is to be\
determined.
:type proposition: :class:`PropLiteral`
:rtype: float in interval [0, 1]
"""
con = proposition.negate()
args = self.argset.get_arguments(con)
return self.max_weight_applicable(args)
def arg_demo():
"""
Demo of how to initialise and call methods of a CAES.
"""
kill = PropLiteral('kill')
intent = PropLiteral('intent')
neg_intent = intent.negate()
murder = PropLiteral('murder')
witness1 = PropLiteral('witness1')
unreliable1 = PropLiteral('unreliable1')
witness2 = PropLiteral('witness2')
unreliable2 = PropLiteral('unreliable2')
ps = ProofStandard([(intent, "beyond_reasonable_doubt")])
arg1 = Argument(murder, premises={kill, intent})
arg2 = Argument(intent, premises={witness1}, exceptions={unreliable1})
arg3 = Argument(neg_intent, premises={witness2}, exceptions={unreliable2})
argset = ArgumentSet()
argset.add_argument(arg1)
argset.add_argument(arg2)
argset.add_argument(arg3)
argset.draw()
argset.write_to_graphviz()
assumptions = {kill, witness1, witness2, unreliable2}
weights = {'arg1': 0.8, 'arg2': 0.3, 'arg3': 0.8}
audience = Audience(assumptions, weights)
caes = CAES(argset, audience, ps)
caes.acceptable(murder)
caes.acceptable(murder.negate())
DOCTEST = False
if __name__ == '__main__':
if DOCTEST:
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
else:
arg_demo()
| {
"content_hash": "38cd226ad91709056a5ab54d616b8a60",
"timestamp": "",
"source": "github",
"line_count": 827,
"max_line_length": 94,
"avg_line_length": 33.582829504232166,
"alnum_prop": 0.6163180066971519,
"repo_name": "ewan-klein/carneades",
"id": "98c02b37c921e40aebc6062aa8e4346136f1e4a9",
"size": "27993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/carneades/caes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32882"
}
],
"symlink_target": ""
} |
import morepath
from webtest import TestApp as Client
def test_function_extends():
class App(morepath.App):
@morepath.dispatch_method('obj')
def foo(self, obj):
return "default"
class Extending(App):
pass
class Alpha(object):
pass
@App.method(App.foo, obj=Alpha)
def app_foo(app, obj):
return "App"
@Extending.method(App.foo, obj=Alpha)
def extending_foo(app, obj):
return "Extending"
assert App().foo(Alpha()) == 'App'
assert Extending().foo(Alpha()) == 'Extending'
def test_extends():
class App(morepath.App):
pass
class Extending(App):
pass
@App.path(path='users/{username}')
class User(object):
def __init__(self, username):
self.username = username
@App.view(model=User)
def render_user(self, request):
return "User: %s" % self.username
@Extending.view(model=User, name='edit')
def edit_user(self, request):
return "Edit user: %s" % self.username
cl = Client(App())
response = cl.get('/users/foo')
assert response.body == b'User: foo'
response = cl.get('/users/foo/edit', status=404)
cl = Client(Extending())
response = cl.get('/users/foo')
assert response.body == b'User: foo'
response = cl.get('/users/foo/edit')
assert response.body == b'Edit user: foo'
def test_overrides_view():
class App(morepath.App):
pass
class Overriding(App):
pass
@App.path(path='users/{username}')
class User(object):
def __init__(self, username):
self.username = username
@App.view(model=User)
def render_user(self, request):
return "User: %s" % self.username
@Overriding.view(model=User)
def render_user2(self, request):
return "USER: %s" % self.username
cl = Client(App())
response = cl.get('/users/foo')
assert response.body == b'User: foo'
cl = Client(Overriding())
response = cl.get('/users/foo')
assert response.body == b'USER: foo'
def test_overrides_model():
class App(morepath.App):
pass
class Overriding(App):
pass
@App.path(path='users/{username}')
class User(object):
def __init__(self, username):
self.username = username
@App.view(model=User)
def render_user(self, request):
return "User: %s" % self.username
@Overriding.path(model=User, path='users/{username}')
def get_user(username):
if username != 'bar':
return None
return User(username)
cl = Client(App())
response = cl.get('/users/foo')
assert response.body == b'User: foo'
response = cl.get('/users/bar')
assert response.body == b'User: bar'
cl = Client(Overriding())
response = cl.get('/users/foo', status=404)
response = cl.get('/users/bar')
assert response.body == b'User: bar'
| {
"content_hash": "6443bcc83ddcf575a7a6b197dd99512e",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 57,
"avg_line_length": 24.239669421487605,
"alnum_prop": 0.5925673371974088,
"repo_name": "faassen/morepath",
"id": "1fb3a713a31e2628bfd020ddeeac8a4912995c06",
"size": "2933",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "morepath/tests/test_extend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "458289"
}
],
"symlink_target": ""
} |
"""
byceps.services.ticketing.models.archived_attendance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from ....database import db
from ....typing import PartyID, UserID
from ....util.instances import ReprBuilder
class ArchivedAttendance(db.Model):
"""A user's attendance of a party.
This is a link between a party and a user that attended it.
While such a link is usually established through a ticket for a
party that is assigned to a user, this entity was introduced for
legacy data for which no information on tickets, orders, seating
areas and so on exists anymore (or should not be migrated).
The data for this entity is expected to be inserted from the
outside. BYCEPS itself currently does not write any archived
attendances (but incorporates them to be displayed on user
profiles).
"""
__tablename__ = 'user_archived_party_attendances'
user_id = db.Column(db.Uuid, db.ForeignKey('users.id'), primary_key=True)
party_id = db.Column(db.UnicodeText, db.ForeignKey('parties.id'), primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
def __init__(self, user_id: UserID, party_id: PartyID) -> None:
self.user_id = user_id
self.party_id = party_id
def __repr__(self) -> str:
return ReprBuilder(self) \
.add('user_id', str(self.user_id)) \
.add('party_id', self.party_id) \
.build()
| {
"content_hash": "e2fc8194be6993d507fb1e92b147be1e",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 87,
"avg_line_length": 34.73913043478261,
"alnum_prop": 0.6645807259073843,
"repo_name": "m-ober/byceps",
"id": "61255f3d2a36b3b63d1132f315b302efda621623",
"size": "1598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byceps/services/ticketing/models/archived_attendance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38499"
},
{
"name": "Dockerfile",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "369989"
},
{
"name": "JavaScript",
"bytes": "9483"
},
{
"name": "Python",
"bytes": "1152996"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '01234567890123456789012345678901234567890123456789'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
)
| {
"content_hash": "0077e117097b9ba2f53633c390bb9284",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 115,
"avg_line_length": 35.358695652173914,
"alnum_prop": 0.6873655087611436,
"repo_name": "fmierlo/django-default-settings",
"id": "3ceab4dfc1dae213b82efa3b49755078a639c0ef",
"size": "3293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "release/1.2/project/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "191433"
},
{
"name": "Shell",
"bytes": "6233"
}
],
"symlink_target": ""
} |
import test.support, unittest
import os
import shutil
import sys
import subprocess
import tempfile
from test import script_helper
from test.script_helper import (spawn_python, kill_python, assert_python_ok,
assert_python_failure)
# XXX (ncoghlan): Move to script_helper and make consistent with run_python
def _kill_python_and_exit_code(p):
data = kill_python(p)
returncode = p.wait()
return data, returncode
class CmdLineTest(unittest.TestCase):
def test_directories(self):
assert_python_failure('.')
assert_python_failure('< .')
def verify_valid_flag(self, cmd_line):
rc, out, err = assert_python_ok(*cmd_line)
self.assertTrue(out == b'' or out.endswith(b'\n'))
self.assertNotIn(b'Traceback', out)
self.assertNotIn(b'Traceback', err)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
rc, out, err = assert_python_ok('-h')
self.assertIn(b'usage', out)
def test_version(self):
version = ('Python %d.%d' % sys.version_info[:2]).encode("ascii")
for switch in '-V', '--version':
rc, out, err = assert_python_ok(switch)
self.assertFalse(err.startswith(version))
self.assertTrue(out.startswith(version))
def test_verbose(self):
# -v causes imports to write to stderr. If the write to
# stderr itself causes an import to happen (for the output
# codec), a recursion loop can occur.
rc, out, err = assert_python_ok('-v')
self.assertNotIn(b'stack overflow', err)
rc, out, err = assert_python_ok('-vv')
self.assertNotIn(b'stack overflow', err)
def test_xoptions(self):
def get_xoptions(*args):
# use subprocess module directly because test.script_helper adds
# "-X faulthandler" to the command line
args = (sys.executable, '-E') + args
args += ('-c', 'import sys; print(sys._xoptions)')
out = subprocess.check_output(args)
opts = eval(out.splitlines()[0])
return opts
opts = get_xoptions()
self.assertEqual(opts, {})
opts = get_xoptions('-Xa', '-Xb=c,d=e')
self.assertEqual(opts, {'a': True, 'b': 'c,d=e'})
def test_showrefcount(self):
def run_python(*args):
# this is similar to assert_python_ok but doesn't strip
# the refcount from stderr. It can be replaced once
# assert_python_ok stops doing that.
cmd = [sys.executable]
cmd.extend(args)
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
p.stdout.close()
p.stderr.close()
rc = p.returncode
self.assertEqual(rc, 0)
return rc, out, err
code = 'import sys; print(sys._xoptions)'
# normally the refcount is hidden
rc, out, err = run_python('-c', code)
self.assertEqual(out.rstrip(), b'{}')
self.assertEqual(err, b'')
# "-X showrefcount" shows the refcount, but only in debug builds
rc, out, err = run_python('-X', 'showrefcount', '-c', code)
self.assertEqual(out.rstrip(), b"{'showrefcount': True}")
if hasattr(sys, 'gettotalrefcount'): # debug build
self.assertRegex(err, br'^\[\d+ refs, \d+ blocks\]')
else:
self.assertEqual(err, b'')
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
assert_python_failure('-m')
# Check we get an error for a nonexistent module
assert_python_failure('-m', 'fnord43520xyz')
# Check the runpy module also gives an error for
# a nonexistent module
assert_python_failure('-m', 'runpy', 'fnord43520xyz')
# All good if module is located and run successfully
assert_python_ok('-m', 'timeit', '-n', '1')
def test_run_module_bug1764407(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write(b'Timer\n')
p.stdin.write(b'exit()\n')
data = kill_python(p)
self.assertTrue(data.find(b'1 loop') != -1)
self.assertTrue(data.find(b'__main__.Timer') != -1)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
assert_python_failure('-c')
# Check we get an error for an uncaught exception
assert_python_failure('-c', 'raise Exception')
# All good if execution is successful
assert_python_ok('-c', 'pass')
@unittest.skipUnless(test.support.FS_NONASCII, 'need support.FS_NONASCII')
def test_non_ascii(self):
# Test handling of non-ascii data
command = ("assert(ord(%r) == %s)"
% (test.support.FS_NONASCII, ord(test.support.FS_NONASCII)))
assert_python_ok('-c', command)
# On Windows, pass bytes to subprocess doesn't test how Python decodes the
# command line, but how subprocess does decode bytes to unicode. Python
# doesn't decode the command line because Windows provides directly the
# arguments as unicode (using wmain() instead of main()).
@unittest.skipIf(sys.platform == 'win32',
'Windows has a native unicode API')
def test_undecodable_code(self):
undecodable = b"\xff"
env = os.environ.copy()
# Use C locale to get ascii for the locale encoding
env['LC_ALL'] = 'C'
code = (
b'import locale; '
b'print(ascii("' + undecodable + b'"), '
b'locale.getpreferredencoding())')
p = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env)
stdout, stderr = p.communicate()
if p.returncode == 1:
# _Py_char2wchar() decoded b'\xff' as '\udcff' (b'\xff' is not
# decodable from ASCII) and run_command() failed on
# PyUnicode_AsUTF8String(). This is the expected behaviour on
# Linux.
pattern = b"Unable to decode the command from the command line:"
elif p.returncode == 0:
# _Py_char2wchar() decoded b'\xff' as '\xff' even if the locale is
# C and the locale encoding is ASCII. It occurs on FreeBSD, Solaris
# and Mac OS X.
pattern = b"'\\xff' "
# The output is followed by the encoding name, an alias to ASCII.
# Examples: "US-ASCII" or "646" (ISO 646, on Solaris).
else:
raise AssertionError("Unknown exit code: %s, output=%a" % (p.returncode, stdout))
if not stdout.startswith(pattern):
raise AssertionError("%a doesn't start with %a" % (stdout, pattern))
@unittest.skipUnless(sys.platform == 'darwin', 'test specific to Mac OS X')
def test_osx_utf8(self):
def check_output(text):
decoded = text.decode('utf-8', 'surrogateescape')
expected = ascii(decoded).encode('ascii') + b'\n'
env = os.environ.copy()
# C locale gives ASCII locale encoding, but Python uses UTF-8
# to parse the command line arguments on Mac OS X
env['LC_ALL'] = 'C'
p = subprocess.Popen(
(sys.executable, "-c", "import sys; print(ascii(sys.argv[1]))", text),
stdout=subprocess.PIPE,
env=env)
stdout, stderr = p.communicate()
self.assertEqual(stdout, expected)
self.assertEqual(p.returncode, 0)
# test valid utf-8
text = 'e:\xe9, euro:\u20ac, non-bmp:\U0010ffff'.encode('utf-8')
check_output(text)
# test invalid utf-8
text = (
b'\xff' # invalid byte
b'\xc3\xa9' # valid utf-8 character
b'\xc3\xff' # invalid byte sequence
b'\xed\xa0\x80' # lone surrogate character (invalid)
)
check_output(text)
def test_unbuffered_output(self):
# Test expected operation of the '-u' switch
for stream in ('stdout', 'stderr'):
# Binary is unbuffered
code = ("import os, sys; sys.%s.buffer.write(b'x'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err if stream == 'stderr' else out
self.assertEqual(data, b'x', "binary %s not unbuffered" % stream)
# Text is line-buffered
code = ("import os, sys; sys.%s.write('x\\n'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err if stream == 'stderr' else out
self.assertEqual(data.strip(), b'x',
"text %s not line-buffered" % stream)
def test_unbuffered_input(self):
# sys.stdin still works with '-u'
code = ("import sys; sys.stdout.write(sys.stdin.read(1))")
p = spawn_python('-u', '-c', code)
p.stdin.write(b'x')
p.stdin.flush()
data, rc = _kill_python_and_exit_code(p)
self.assertEqual(rc, 0)
self.assertTrue(data.startswith(b'x'), data)
def test_large_PYTHONPATH(self):
path1 = "ABCDE" * 100
path2 = "FGHIJ" * 100
path = path1 + os.pathsep + path2
code = """if 1:
import sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc, out, err = assert_python_ok('-S', '-c', code,
PYTHONPATH=path)
self.assertIn(path1.encode('ascii'), out)
self.assertIn(path2.encode('ascii'), out)
def test_empty_PYTHONPATH_issue16309(self):
# On Posix, it is documented that setting PATH to the
# empty string is equivalent to not setting PATH at all,
# which is an exception to the rule that in a string like
# "/bin::/usr/bin" the empty string in the middle gets
# interpreted as '.'
code = """if 1:
import sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc1, out1, err1 = assert_python_ok('-c', code, PYTHONPATH="")
rc2, out2, err2 = assert_python_ok('-c', code, __isolated=False)
# regarding to Posix specification, outputs should be equal
# for empty and unset PYTHONPATH
self.assertEqual(out1, out2)
def test_displayhook_unencodable(self):
for encoding in ('ascii', 'latin-1', 'utf-8'):
# We are testing a PYTHON environment variable here, so we can't
# use -E, -I, or script_helper (which uses them). So instead we do
# poor-man's isolation by deleting the PYTHON vars from env.
env = {key:value for (key,value) in os.environ.copy().items()
if not key.startswith('PYTHON')}
env['PYTHONIOENCODING'] = encoding
p = subprocess.Popen(
[sys.executable, '-i'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
# non-ascii, surrogate, non-BMP printable, non-BMP unprintable
text = "a=\xe9 b=\uDC80 c=\U00010000 d=\U0010FFFF"
p.stdin.write(ascii(text).encode('ascii') + b"\n")
p.stdin.write(b'exit()\n')
data = kill_python(p)
escaped = repr(text).encode(encoding, 'backslashreplace')
self.assertIn(escaped, data)
def check_input(self, code, expected):
with tempfile.NamedTemporaryFile("wb+") as stdin:
sep = os.linesep.encode('ASCII')
stdin.write(sep.join((b'abc', b'def')))
stdin.flush()
stdin.seek(0)
with subprocess.Popen(
(sys.executable, "-c", code),
stdin=stdin, stdout=subprocess.PIPE) as proc:
stdout, stderr = proc.communicate()
self.assertEqual(stdout.rstrip(), expected)
def test_stdin_readline(self):
# Issue #11272: check that sys.stdin.readline() replaces '\r\n' by '\n'
# on Windows (sys.stdin is opened in binary mode)
self.check_input(
"import sys; print(repr(sys.stdin.readline()))",
b"'abc\\n'")
def test_builtin_input(self):
# Issue #11272: check that input() strips newlines ('\n' or '\r\n')
self.check_input(
"print(repr(input()))",
b"'abc'")
def test_output_newline(self):
# Issue 13119 Newline for print() should be \r\n on Windows.
code = """if 1:
import sys
print(1)
print(2)
print(3, file=sys.stderr)
print(4, file=sys.stderr)"""
rc, out, err = assert_python_ok('-c', code)
if sys.platform == 'win32':
self.assertEqual(b'1\r\n2\r\n', out)
self.assertEqual(b'3\r\n4', err)
else:
self.assertEqual(b'1\n2\n', out)
self.assertEqual(b'3\n4', err)
def test_unmached_quote(self):
# Issue #10206: python program starting with unmatched quote
# spewed spaces to stdout
rc, out, err = assert_python_failure('-c', "'")
self.assertRegex(err.decode('ascii', 'ignore'), 'SyntaxError')
self.assertEqual(b'', out)
def test_stdout_flush_at_shutdown(self):
# Issue #5319: if stdout.flush() fails at shutdown, an error should
# be printed out.
code = """if 1:
import os, sys
sys.stdout.write('x')
os.close(sys.stdout.fileno())"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(b'', out)
self.assertRegex(err.decode('ascii', 'ignore'),
'Exception ignored in.*\nOSError: .*')
def test_closed_stdout(self):
# Issue #13444: if stdout has been explicitly closed, we should
# not attempt to flush it at shutdown.
code = "import sys; sys.stdout.close()"
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(b'', err)
# Issue #7111: Python should work without standard streams
@unittest.skipIf(os.name != 'posix', "test needs POSIX semantics")
def _test_no_stdio(self, streams):
code = """if 1:
import os, sys
for i, s in enumerate({streams}):
if getattr(sys, s) is not None:
os._exit(i + 1)
os._exit(42)""".format(streams=streams)
def preexec():
if 'stdin' in streams:
os.close(0)
if 'stdout' in streams:
os.close(1)
if 'stderr' in streams:
os.close(2)
p = subprocess.Popen(
[sys.executable, "-E", "-c", code],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=preexec)
out, err = p.communicate()
self.assertEqual(test.support.strip_python_stderr(err), b'')
self.assertEqual(p.returncode, 42)
def test_no_stdin(self):
self._test_no_stdio(['stdin'])
def test_no_stdout(self):
self._test_no_stdio(['stdout'])
def test_no_stderr(self):
self._test_no_stdio(['stderr'])
def test_no_std_streams(self):
self._test_no_stdio(['stdin', 'stdout', 'stderr'])
def test_hash_randomization(self):
# Verify that -R enables hash randomization:
self.verify_valid_flag('-R')
hashes = []
for i in range(2):
code = 'print(hash("spam"))'
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
hashes.append(out)
self.assertNotEqual(hashes[0], hashes[1])
# Verify that sys.flags contains hash_randomization
code = 'import sys; print("random is", sys.flags.hash_randomization)'
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertIn(b'random is 1', out)
def test_del___main__(self):
# Issue #15001: PyRun_SimpleFileExFlags() did crash because it kept a
# borrowed reference to the dict of __main__ module and later modify
# the dict whereas the module was destroyed
filename = test.support.TESTFN
self.addCleanup(test.support.unlink, filename)
with open(filename, "w") as script:
print("import sys", file=script)
print("del sys.modules['__main__']", file=script)
assert_python_ok(filename)
def test_unknown_options(self):
rc, out, err = assert_python_failure('-E', '-z')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
# Add "without='-E'" to prevent _assert_python to append -E
# to env_vars and change the output of stderr
rc, out, err = assert_python_failure('-z', without='-E')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
rc, out, err = assert_python_failure('-a', '-z', without='-E')
self.assertIn(b'Unknown option: -a', err)
# only the first unknown option is reported
self.assertNotIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -a'), 1)
self.assertEqual(b'', out)
@unittest.skipIf(script_helper._interpreter_requires_environment(),
'Cannot run -I tests when PYTHON env vars are required.')
def test_isolatedmode(self):
self.verify_valid_flag('-I')
self.verify_valid_flag('-IEs')
rc, out, err = assert_python_ok('-I', '-c',
'from sys import flags as f; '
'print(f.no_user_site, f.ignore_environment, f.isolated)',
# dummyvar to prevent extranous -E
dummyvar="")
self.assertEqual(out.strip(), b'1 1 1')
with test.support.temp_cwd() as tmpdir:
fake = os.path.join(tmpdir, "uuid.py")
main = os.path.join(tmpdir, "main.py")
with open(fake, "w") as f:
f.write("raise RuntimeError('isolated mode test')\n")
with open(main, "w") as f:
f.write("import uuid\n")
f.write("print('ok')\n")
self.assertRaises(subprocess.CalledProcessError,
subprocess.check_output,
[sys.executable, main], cwd=tmpdir,
stderr=subprocess.DEVNULL)
out = subprocess.check_output([sys.executable, "-I", main],
cwd=tmpdir)
self.assertEqual(out.strip(), b"ok")
def test_main():
test.support.run_unittest(CmdLineTest)
test.support.reap_children()
if __name__ == "__main__":
test_main()
| {
"content_hash": "de4c9f631a7936d975c3edee70b071d5",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 93,
"avg_line_length": 41.15578947368421,
"alnum_prop": 0.5624840145275973,
"repo_name": "chidea/GoPythonDLLWrapper",
"id": "cb9bbddc8c62c4d3ddb8aefbb32df421dae2fe81",
"size": "19747",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/lib/test/test_cmd_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1345"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Go",
"bytes": "2169"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "152703"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "23244205"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "3770"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import json, gzip, sys
for l in sys.stdin:
print json.dumps(eval(l))
| {
"content_hash": "19b07009b0ab269ba696aef0ef4fe579",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 27,
"avg_line_length": 18,
"alnum_prop": 0.6944444444444444,
"repo_name": "etrain/datascripts",
"id": "0a4891454303eb51f0475f7b3d83decc3a0bc731",
"size": "72",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amazon/parse_loose_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "72"
},
{
"name": "Shell",
"bytes": "692"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
from wombat import readadc
import colorsys
# pin assignments
adc_chan = 2 # MCP9700 temp sensor on analog input CH2
red = 23 # red LED on GPIO 23 (active high)
green = 22 # green LED on GPIO 22 (active high)
blue = 27 # blue LED on GPIO 27 (active high)
# constants
min_temp = 20 # min and max temps (deg C) for display range
max_temp = 40
# configure I/O
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# configure pins
GPIO.setup(red, GPIO.OUT)
GPIO.setup(green, GPIO.OUT)
GPIO.setup(blue, GPIO.OUT)
# setup PWM outputs
pr = GPIO.PWM(red, 100) # use 100 Hz for all
pg = GPIO.PWM(green, 100)
pb = GPIO.PWM(blue, 100)
pr.start(0) # all initially off
pg.start(0)
pb.start(0)
try:
while True:
# convert ADC input level (0 - 1023) to temperature (-50 - 280 deg C)
temp = (readadc(adc_chan)*3.3/1023 - 0.5) * 100
# map temperature to hue
# min_temp corresponds to hue = 0,
# max_temp corresponds to hue = 1
if temp < min_temp:
hue = 0
elif temp > max_temp:
hue = 1
else:
hue = (temp - min_temp) / (max_temp - min_temp)
# convert hue to RGB values (0-1)
rgb = colorsys.hsv_to_rgb(hue, 1, 1) # colours are fully saturated, max value
# adjust PWM outputs
pr.ChangeDutyCycle(rgb[0]*100) # red
pg.ChangeDutyCycle(rgb[1]*100) # green
pb.ChangeDutyCycle(rgb[2]*100) # blue
except KeyboardInterrupt:
pr.stop()
pg.stop()
pb.stop()
GPIO.cleanup()
| {
"content_hash": "c6a9f2312c6146ff579c651b0d35f4a6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 86,
"avg_line_length": 28.220338983050848,
"alnum_prop": 0.5741741741741742,
"repo_name": "gooligumelec/wombat-projects",
"id": "28eb82f60b61782d5d1f4651b951cfbdd52f6b01",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "temp_rgb-led.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13851"
}
],
"symlink_target": ""
} |
import shutil
from framesource import VideoFrameSource
import tempfile
import decimal
import rateconverter
import os
"""
extractframes package - extract frames from a video.
The top level module contains only one function, extract() which is the main
way you'd accomplish frame extractions.
"""
# maybe refactor this part so that the file movement is also testable
# separately from the extraction?
def extract(infile, outfile, ratio=None, in_frames=None, quiet=True,
out_count=None, out_offset=0):
outdir = os.path.dirname(outfile)
if len(outdir) == 0:
outdir = '.'
if not os.path.isdir(outdir):
raise IOError('Destination directory %s does not exist!' % os.path.dirname(outfile))
if ratio is not None and out_count is not None:
raise ValueError('You can only specify one of ratio and out_count')
elif ratio is None and out_count is None:
ratio = 1
frame_source = VideoFrameSource(infile, quiet=quiet)
if not in_frames:
# xrange is half-open interval. For closed interval, would be
# [0, get_num_frames() - 1]
in_frames = xrange(0, frame_source.get_num_frames())
if in_frames[0] < 0 or in_frames[-1] > frame_source.get_num_frames() - 1:
raise ValueError("Requested bounds %s don't fit in %d-frame video file"
% (in_frames, frame_source.get_num_frames()))
in_count = in_frames[-1] - in_frames[0] + 1
if out_count is not None:
ratio = rateconverter.ratio_for_number(in_count, out_count)
iterator = rateconverter.convert_integers_by_iterator_ratio(ratio, in_frames,
dest_offset=out_offset)
if not quiet:
try:
import progressbar
pbar = progressbar.ProgressBar(widgets=['Copying frames to destination',
progressbar.Bar(), progressbar.ETA()])
test = pbar([1])
iterator = pbar(list(iterator))
except (ImportError, TypeError):
print "(For a progress bar, install python-progressbar v. 2.3)"
for src, dst in iterator:
source = frame_source.get_frame_file(src)
dest = outfile % dst
shutil.copy(source, dest)
| {
"content_hash": "427a70a1a06d0ac4f698af34cd64e7db",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 92,
"avg_line_length": 33.738461538461536,
"alnum_prop": 0.6511627906976745,
"repo_name": "robmoggach/python-moshion",
"id": "dea904ed941f08ba82f3f4aac712c7ac1febe2eb",
"size": "2215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moshion/extractframes/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "161903"
},
{
"name": "Shell",
"bytes": "159080"
}
],
"symlink_target": ""
} |
"""Fake image input pipeline. Returns the same batch of ones over and over."""
import copy
from init2winit.dataset_lib import data_utils
import jax
import jax.numpy as jnp
from ml_collections.config_dict import config_dict
import numpy as np
TRAIN_IMAGES = 1281167
EVAL_IMAGES = 50000
NUM_CLASSES = 1000
IMAGE_SIZE = 224
DEFAULT_HPARAMS = config_dict.ConfigDict(dict(
input_shape=(224, 224, 3),
output_shape=(NUM_CLASSES,),
train_size=TRAIN_IMAGES,
valid_size=EVAL_IMAGES))
METADATA = {
'apply_one_hot_in_loss': False,
}
def get_fake_batch(hps):
"""Generate batches of images of all ones and one-hot labels."""
batch_size = hps.batch_size
input_shape = hps.input_shape
num_classes = hps.output_shape[0]
train_input_shape = (batch_size, *input_shape)
images = jnp.ones(train_input_shape, dtype=jnp.float32)
labels = jax.nn.one_hot(
np.zeros((batch_size,)), num_classes, dtype=jnp.int32)
batch = {
'inputs': images,
'targets': labels,
'weights': jnp.ones(batch_size, dtype=images.dtype),
}
return batch
def get_fake(shuffle_rng, batch_size, eval_batch_size, hps=None):
"""Data generators for imagenet."""
del shuffle_rng
per_host_batch_size = batch_size // jax.process_count()
per_host_eval_batch_size = eval_batch_size // jax.process_count()
train_hps = copy.copy(hps)
train_hps.unlock()
train_hps.batch_size = per_host_batch_size
fake_train_batch = get_fake_batch(train_hps)
test_hps = copy.copy(hps)
test_hps.unlock()
test_hps.batch_size = per_host_eval_batch_size
fake_test_batch = get_fake_batch(test_hps)
def train_iterator_fn():
while True:
yield fake_train_batch
def valid_epoch(epoch, num_batches=None):
del num_batches
del epoch
# Note that we do // beacuse we do not support partial batching for the fake
# dataset.
for _ in range(hps.valid_size // eval_batch_size):
yield fake_test_batch
# pylint: disable=unreachable
def eval_train_epoch(*args, **kwargs):
del args
del kwargs
return
yield # This yield is needed to make this a valid (null) iterator.
# pylint: enable=unreachable
# pylint: disable=unreachable
def test_epoch(*args, **kwargs):
del args
del kwargs
return
yield # This yield is needed to make this a valid (null) iterator.
# pylint: enable=unreachable
return data_utils.Dataset(
train_iterator_fn, eval_train_epoch, valid_epoch, test_epoch)
| {
"content_hash": "15dc71a4b03d1b3ea0b085bf5393000a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 26.526881720430108,
"alnum_prop": 0.6858532630725578,
"repo_name": "google/init2winit",
"id": "5a4f06ac9241a8ed91d0a58922b9d7f5843fa374",
"size": "3070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "init2winit/dataset_lib/fake_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1560124"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
GRAY_SCALE_RANGE = 255
import pickle
data_filename = 'data.pkl'
print('Loading data from file \'' + data_filename + '\' ...')
with open(data_filename, 'rb') as f:
train_labels = pickle.load(f)
train_images = pickle.load(f)
test_labels = pickle.load(f)
test_images = pickle.load(f)
num_pixel = pickle.load(f)
print('Data loading complete.')
import tensorflow as tf
train_images = np.array(train_images)
train_images.resize(train_images.size // num_pixel, num_pixel)
test_images = np.array(test_images)
test_images.resize(test_images.size // num_pixel, num_pixel)
test_labels = np.array(test_labels)
train_labels = np.array(train_labels)
train_labels_ten = np.zeros((train_labels.size, 10))
test_labels_ten = np.zeros((test_labels.size, 10))
for i in range(10):
train_labels_ten[:, i] = train_labels == i
test_labels_ten[:, i] = test_labels == i
## normalization
train_images = train_images / GRAY_SCALE_RANGE
test_images = test_images / GRAY_SCALE_RANGE
x = tf.placeholder(tf.float32, [None, num_pixel])
W = tf.Variable(tf.zeros([num_pixel, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder("float", [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y)))
train_step = tf.train.GradientDescentOptimizer(1e-2).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
def next_batch(batch_size):
batch_idx = np.random.choice(range(train_images.shape[0]), size = batch_size, replace = False)
batch_x = np.zeros((batch_size, num_pixel))
batch_y_ = np.zeros((batch_size, 10))
for i in range(batch_size):
batch_x[i, :] = train_images[batch_idx[i], :]
batch_y_[i] = train_labels_ten[batch_idx[i]]
return batch_x, batch_y_
for i in range(5000):
batch_x, batch_y_ = next_batch(100)
sess.run(train_step, feed_dict = {x: batch_x, y_: batch_y_})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print('Accuracy:', sess.run(accuracy, feed_dict = {x: test_images, y_: test_labels_ten}))
| {
"content_hash": "8078808008d3dd2ef57849195a8940e8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 98,
"avg_line_length": 33.8,
"alnum_prop": 0.6777423759672281,
"repo_name": "Evensgn/MNIST-learning",
"id": "ed6953722cc49ebe95882f5f5a229b093be0cfb4",
"size": "2197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnist_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17225"
}
],
"symlink_target": ""
} |
"""
Run three algorithm learning tasks: "repeat copy", "associative recall" and
"priority sort" one by one.
"""
print("Learning algorithm_learning begin:")
print("copy:")
import learning_repeat_copy_lstm
print("recall:")
import learning_associative_recall_lstm
print("sort:")
import learning_priority_sort_lstm
print("Learning algorithm_learning end.")
#
# def copy():
# learning_repeat_copy_lstm
#
#
# def recall():
# learning_associative_recall_lstm
#
#
# def sort():
# learning_priority_sort_lstm
#
#
# if __name__ == "__main__":
# print("copy:")
# copy()
# print("recall:")
# recall()
# print("sort:")
# sort()
| {
"content_hash": "2336b977e5d779d10c611f5d6ba3671d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 17.72972972972973,
"alnum_prop": 0.6417682926829268,
"repo_name": "SigmaQuan/NTM-Keras",
"id": "80a6ba2df376c82a280744fe02556506d9198583",
"size": "656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithm_learning/learning_algorithm_lstm.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "239791"
}
],
"symlink_target": ""
} |
import smbus
import time
import math
import RPi.GPIO as GPIO
import struct
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
# I2C Address of Arduino
address = 0x04
# Command Format
# digitalRead() command format header
dRead_cmd = [1]
# digitalWrite() command format header
dWrite_cmd = [2]
# analogRead() command format header
aRead_cmd = [3]
# analogWrite() command format header
aWrite_cmd = [4]
# pinMode() command format header
pMode_cmd = [5]
# Ultrasonic read
uRead_cmd = [7]
# Get firmware version
version_cmd = [8]
# Accelerometer (+/- 1.5g) read
acc_xyz_cmd = [20]
# RTC get time
rtc_getTime_cmd = [30]
# DHT Pro sensor temperature
dht_temp_cmd = [40]
# Grove LED Bar commands
# Initialise
ledBarInit_cmd = [50]
# Set orientation
ledBarOrient_cmd = [51]
# Set level
ledBarLevel_cmd = [52]
# Set single LED
ledBarSetOne_cmd = [53]
# Toggle single LED
ledBarToggleOne_cmd = [54]
# Set all LEDs
ledBarSet_cmd = [55]
# Get current state
ledBarGet_cmd = [56]
# Grove 4 Digit Display commands
# Initialise
fourDigitInit_cmd = [70]
# Set brightness, not visible until next cmd
fourDigitBrightness_cmd = [71]
# Set numeric value without leading zeros
fourDigitValue_cmd = [72]
# Set numeric value with leading zeros
fourDigitValueZeros_cmd = [73]
# Set individual digit
fourDigitIndividualDigit_cmd = [74]
# Set individual leds of a segment
fourDigitIndividualLeds_cmd = [75]
# Set left and right values with colon
fourDigitScore_cmd = [76]
# Analog read for n seconds
fourDigitAnalogRead_cmd = [77]
# Entire display on
fourDigitAllOn_cmd = [78]
# Entire display off
fourDigitAllOff_cmd = [79]
# Grove Chainable RGB LED commands
# Store color for later use
storeColor_cmd = [90]
# Initialise
chainableRgbLedInit_cmd = [91]
# Initialise and test with a simple color
chainableRgbLedTest_cmd = [92]
# Set one or more leds to the stored color by pattern
chainableRgbLedSetPattern_cmd = [93]
# set one or more leds to the stored color by modulo
chainableRgbLedSetModulo_cmd = [94]
# sets leds similar to a bar graph, reversible
chainableRgbLedSetLevel_cmd = [95]
# This allows us to be more specific about which commands contain unused bytes
unused = 0
# Function declarations of the various functions used for encoding and sending
# data from RPi to Arduino
# Write I2C block
def write_i2c_block(address, block):
try:
return bus.write_i2c_block_data(address, 1, block)
except IOError:
print "IOError"
return -1
# Read I2C byte
def read_i2c_byte(address):
try:
return bus.read_byte(address)
except IOError:
print "IOError"
return -1
# Read I2C block
def read_i2c_block(address):
try:
return bus.read_i2c_block_data(address, 1)
except IOError:
print "IOError"
return -1
# Arduino Digital Read
def digitalRead(pin):
write_i2c_block(address, dRead_cmd + [pin, unused, unused])
time.sleep(.1)
n = read_i2c_byte(address)
return n
# Arduino Digital Write
def digitalWrite(pin, value):
write_i2c_block(address, dWrite_cmd + [pin, value, unused])
return 1
# Setting Up Pin mode on Arduino
def pinMode(pin, mode):
if mode == "OUTPUT":
write_i2c_block(address, pMode_cmd + [pin, 1, unused])
elif mode == "INPUT":
write_i2c_block(address, pMode_cmd + [pin, 0, unused])
return 1
# Read analog value from Pin
def analogRead(pin):
bus.write_i2c_block_data(address, 1, aRead_cmd + [pin, unused, unused])
#time.sleep(.001)
bus.read_byte(address)
number = bus.read_i2c_block_data(address, 1)
return number[1] * 256 + number[2]
# Write PWM
def analogWrite(pin, value):
write_i2c_block(address, aWrite_cmd + [pin, value, unused])
return 1
# Read temp in Celsius from Grove Temperature Sensor
def temp(pin, model = '1.0'):
# each of the sensor revisions use different thermistors, each with their own B value constant
if model == '1.2':
bValue = 4250 # sensor v1.2 uses thermistor ??? (assuming NCP18WF104F03RC until SeeedStudio clarifies)
elif model == '1.1':
bValue = 4250 # sensor v1.1 uses thermistor NCP18WF104F03RC
else:
bValue = 3975 # sensor v1.0 uses thermistor TTC3A103*39H
a = analogRead(pin)
resistance = (float)(1023 - a) * 10000 / a
t = (float)(1 / (math.log(resistance / 10000) / bValue + 1 / 298.15) - 273.15)
return t
# Read value from Grove Ultrasonic
def ultrasonicRead(pin):
write_i2c_block(address, uRead_cmd + [pin, unused, unused])
time.sleep(.2)
read_i2c_byte(address)
number = read_i2c_block(address)
return (number[1] * 256 + number[2])
# Read the firmware version
def version():
write_i2c_block(address, version_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
return "%s.%s.%s" % (number[1], number[2], number[3])
# Read Grove Accelerometer (+/- 1.5g) XYZ value
def acc_xyz():
write_i2c_block(address, acc_xyz_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
if number[1] > 32:
number[1] = - (number[1] - 224)
if number[2] > 32:
number[2] = - (number[2] - 224)
if number[3] > 32:
number[3] = - (number[3] - 224)
return (number[1], number[2], number[3])
# Read from Grove RTC
def rtc_getTime():
write_i2c_block(address, rtc_getTime_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
return number
# Read and return temperature and humidity from Grove DHT Pro
def dht(pin, module_type):
write_i2c_block(address, dht_temp_cmd + [pin, module_type, unused])
# Delay necessary for proper reading fron DHT sensor
time.sleep(.6)
try:
read_i2c_byte(address)
number = read_i2c_block(address)
if number == -1:
return -1
except (TypeError, IndexError):
return -1
# data returned in IEEE format as a float in 4 bytes
f = 0
# data is reversed
for element in reversed(number[1:5]):
# Converted to hex
hex_val = hex(element)
#print hex_val
try:
h_val = hex_val[2] + hex_val[3]
except IndexError:
h_val = '0' + hex_val[2]
# Convert to char array
if f == 0:
h = h_val
f = 1
else:
h = h + h_val
# convert the temp back to float
t = round(struct.unpack('!f', h.decode('hex'))[0], 2)
h = ''
# data is reversed
for element in reversed(number[5:9]):
# Converted to hex
hex_val = hex(element)
# Print hex_val
try:
h_val = hex_val[2] + hex_val[3]
except IndexError:
h_val = '0' + hex_val[2]
# Convert to char array
if f == 0:
h = h_val
f = 1
else:
h = h + h_val
# convert back to float
hum = round(struct.unpack('!f', h.decode('hex'))[0], 2)
return [t, hum]
# Grove LED Bar - initialise
# orientation: (0 = red to green, 1 = green to red)
def ledBar_init(pin, orientation):
write_i2c_block(address, ledBarInit_cmd + [pin, orientation, unused])
return 1
# Grove LED Bar - set orientation
# orientation: (0 = red to green, 1 = green to red)
def ledBar_orientation(pin, orientation):
write_i2c_block(address, ledBarOrient_cmd + [pin, orientation, unused])
return 1
# Grove LED Bar - set level
# level: (0-10)
def ledBar_setLevel(pin, level):
write_i2c_block(address, ledBarLevel_cmd + [pin, level, unused])
return 1
# Grove LED Bar - set single led
# led: which led (1-10)
# state: off or on (0-1)
def ledBar_setLed(pin, led, state):
write_i2c_block(address, ledBarSetOne_cmd + [pin, led, state])
return 1
# Grove LED Bar - toggle single led
# led: which led (1-10)
def ledBar_toggleLed(pin, led):
write_i2c_block(address, ledBarToggleOne_cmd + [pin, led, unused])
return 1
# Grove LED Bar - set all leds
# state: (0-1023) or (0x00-0x3FF) or (0b0000000000-0b1111111111) or (int('0000000000',2)-int('1111111111',2))
def ledBar_setBits(pin, state):
byte1 = state & 255
byte2 = state >> 8
write_i2c_block(address, ledBarSet_cmd + [pin, byte1, byte2])
return 1
# Grove LED Bar - get current state
# state: (0-1023) a bit for each of the 10 LEDs
def ledBar_getBits(pin):
write_i2c_block(address, ledBarGet_cmd + [pin, unused, unused])
time.sleep(.2)
read_i2c_byte(0x04)
block = read_i2c_block(0x04)
return block[1] ^ (block[2] << 8)
# Grove 4 Digit Display - initialise
def fourDigit_init(pin):
write_i2c_block(address, fourDigitInit_cmd + [pin, unused, unused])
return 1
# Grove 4 Digit Display - set numeric value with or without leading zeros
# value: (0-65535) or (0000-FFFF)
def fourDigit_number(pin, value, leading_zero):
# split the value into two bytes so we can render 0000-FFFF on the display
byte1 = value & 255
byte2 = value >> 8
# separate commands to overcome current 4 bytes per command limitation
if (leading_zero):
write_i2c_block(address, fourDigitValue_cmd + [pin, byte1, byte2])
else:
write_i2c_block(address, fourDigitValueZeros_cmd + [pin, byte1, byte2])
time.sleep(.05)
return 1
# Grove 4 Digit Display - set brightness
# brightness: (0-7)
def fourDigit_brightness(pin, brightness):
# not actually visible until next command is executed
write_i2c_block(address, fourDigitBrightness_cmd + [pin, brightness, unused])
time.sleep(.05)
return 1
# Grove 4 Digit Display - set individual segment (0-9,A-F)
# segment: (0-3)
# value: (0-15) or (0-F)
def fourDigit_digit(pin, segment, value):
write_i2c_block(address, fourDigitIndividualDigit_cmd + [pin, segment, value])
time.sleep(.05)
return 1
# Grove 4 Digit Display - set 7 individual leds of a segment
# segment: (0-3)
# leds: (0-255) or (0-0xFF) one bit per led, segment 2 is special, 8th bit is the colon
def fourDigit_segment(pin, segment, leds):
write_i2c_block(address, fourDigitIndividualLeds_cmd + [pin, segment, leds])
time.sleep(.05)
return 1
# Grove 4 Digit Display - set left and right values (0-99), with leading zeros and a colon
# left: (0-255) or (0-FF)
# right: (0-255) or (0-FF)
# colon will be lit
def fourDigit_score(pin, left, right):
write_i2c_block(address, fourDigitScore_cmd + [pin, left, right])
time.sleep(.05)
return 1
# Grove 4 Digit Display - display analogRead value for n seconds, 4 samples per second
# analog: analog pin to read
# duration: analog read for this many seconds
def fourDigit_monitor(pin, analog, duration):
write_i2c_block(address, fourDigitAnalogRead_cmd + [pin, analog, duration])
time.sleep(duration + .05)
return 1
# Grove 4 Digit Display - turn entire display on (88:88)
def fourDigit_on(pin):
write_i2c_block(address, fourDigitAllOn_cmd + [pin, unused, unused])
time.sleep(.05)
return 1
# Grove 4 Digit Display - turn entire display off
def fourDigit_off(pin):
write_i2c_block(address, fourDigitAllOff_cmd + [pin, unused, unused])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - store a color for later use
# red: 0-255
# green: 0-255
# blue: 0-255
def storeColor(red, green, blue):
write_i2c_block(address, storeColor_cmd + [red, green, blue])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - initialise
# numLeds: how many leds do you have in the chain
def chainableRgbLed_init(pin, numLeds):
write_i2c_block(address, chainableRgbLedInit_cmd + [pin, numLeds, unused])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - initialise and test with a simple color
# numLeds: how many leds do you have in the chain
# testColor: (0-7) 3 bits in total - a bit for red, green and blue, eg. 0x04 == 0b100 (0bRGB) == rgb(255, 0, 0) == #FF0000 == red
# ie. 0 black, 1 blue, 2 green, 3 cyan, 4 red, 5 magenta, 6 yellow, 7 white
def chainableRgbLed_test(pin, numLeds, testColor):
write_i2c_block(address, chainableRgbLedTest_cmd + [pin, numLeds, testColor])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - set one or more leds to the stored color by pattern
# pattern: (0-3) 0 = this led only, 1 all leds except this led, 2 this led and all leds inwards, 3 this led and all leds outwards
# whichLed: index of led you wish to set counting outwards from the GrovePi, 0 = led closest to the GrovePi
def chainableRgbLed_pattern(pin, pattern, whichLed):
write_i2c_block(address, chainableRgbLedSetPattern_cmd + [pin, pattern, whichLed])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - set one or more leds to the stored color by modulo
# offset: index of led you wish to start at, 0 = led closest to the GrovePi, counting outwards
# divisor: when 1 (default) sets stored color on all leds >= offset, when 2 sets every 2nd led >= offset and so on
def chainableRgbLed_modulo(pin, offset, divisor):
write_i2c_block(address, chainableRgbLedSetModulo_cmd + [pin, offset, divisor])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - sets leds similar to a bar graph, reversible
# level: (0-10) the number of leds you wish to set to the stored color
# reversible (0-1) when 0 counting outwards from GrovePi, 0 = led closest to the GrovePi, otherwise counting inwards
def chainableRgbLed_setLevel(pin, level, reverse):
write_i2c_block(address, chainableRgbLedSetLevel_cmd + [pin, level, reverse])
time.sleep(.05)
return 1
| {
"content_hash": "7d7ab1c4aaaa1b1d173ffb017ab5bee8",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 129,
"avg_line_length": 30.140589569160998,
"alnum_prop": 0.6841709298826362,
"repo_name": "martinschaef/grovepi",
"id": "40ddd5fd7e73b7d8a9d8102a8db3b467aae29988",
"size": "13742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grovepi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18492"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 0059 add show_banner_text
Revises: 0058 set all has_banner_text
Create Date: 2021-10-04 00:10:14.535185
"""
# revision identifiers, used by Alembic.
revision = '0059 add show_banner_text'
down_revision = '0058 set all has_banner_text'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('events',
sa.Column(
'show_banner_text',
sa.Boolean(),
nullable=True,
server_default="True"
)
)
op.drop_column('events', 'has_banner_text')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('events', sa.Column('has_banner_text', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_column('events', 'show_banner_text')
# ### end Alembic commands ###
| {
"content_hash": "3686eac18ca8c5bd784315bcaf264feb",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 107,
"avg_line_length": 26.314285714285713,
"alnum_prop": 0.6482084690553745,
"repo_name": "NewAcropolis/api",
"id": "f7adc3750332a50ed32b0df1afc7f5cc56b76813",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/0059.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10421"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "791740"
},
{
"name": "Shell",
"bytes": "66108"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth import get_user_model
import json
from readux.annotations.models import Annotation
class Command(BaseCommand):
'''Import a JSON file of annotation data in the format provided
by the annotator store API (i.e., search results) and create
corresponding local annotations for.
'''
def add_arguments(self, parser):
parser.add_argument('file',
help='JSON file with annotation data')
def handle(self, *args, **options):
print options['file']
with open(options['file']) as datafile:
data = json.loads(datafile.read())
for annotation in data['rows']:
self.import_annotation(annotation)
def import_annotation(self, data):
'''Create and save a new annotation, setting fields based on a
dictionary of data passed in. Raises an error if an annotation
author is not found as a user in the database.'''
note = Annotation()
# NOTE: because we are using uuid for annotation id field,
# importing an annotation twice does not error, but simply
# replaces the old copy. Might want to add checks for this...
# required fields that should always be present
# (not normally set by user)
for field in ['updated', 'created', 'id']:
setattr(note, field, data[field])
del data[field]
# user is special: annotation data only includes username,
# but we need a user object
# NOTE: this could result in making one person's annotations
# available to someone else, if someone is using a different
# username in another instance
if 'user' in data:
try:
note.user = get_user_model().objects.get(username=data['user'])
del data['user']
except get_user_model().DoesNotExist:
raise CommandError('Cannot import annotations for user %s (does not exist)' % data['user'])
for field in Annotation.common_fields:
if field in data:
setattr(note, field, data[field])
del data[field]
# put any other data that is left in extra data json field
if data:
note.extra_data.update(data)
note.save()
| {
"content_hash": "1fc68d1a4231c2b360c98364a529858b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 107,
"avg_line_length": 38.704918032786885,
"alnum_prop": 0.626853028377806,
"repo_name": "emory-libraries/readux",
"id": "2076f85f36e48324902c18f6b2a587da1153e28b",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readux/annotations/management/commands/import_annotations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110298"
},
{
"name": "HTML",
"bytes": "82431"
},
{
"name": "JavaScript",
"bytes": "666176"
},
{
"name": "Python",
"bytes": "553514"
},
{
"name": "XSLT",
"bytes": "13269"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import argparse
import numpy as np
import matplotlib
import multiprocessing
import logging
matplotlib.use('agg')
import matplotlib.pyplot as plt
from pychemia.code.abinit import AbinitInput, AbinitOutput
from pychemia.population.orbitaldftu import dmatpawu2params, params2dmatpawu, OrbitalDFTU
from pychemia.population.orbitaldftu import get_final_abinit_out
from pychemia.searcher import FireFly
from pychemia.db import get_database
def compare_params(path):
if not os.path.isfile(path + os.sep + 'abinit.in'):
print('ERROR: No abinit.in found at %s' % path)
return
# For making easier to see the values
np.set_printoptions(linewidth=200, suppress=True)
# Reading the INPUT
abi = AbinitInput(path + os.sep + 'abinit.in')
if 'lpawu' not in abi.variables:
raise ValueError("Variable lpawu not found")
ndim = 2 * max(abi['lpawu']) + 1
idmatpawu = np.array(abi['dmatpawu']).reshape(-1, ndim, ndim)
iparams = dmatpawu2params(idmatpawu, ndim)
# Reading the OUTPUT
abinitout = get_final_abinit_out(path)
abo = AbinitOutput(abinitout)
dmatpawu = abo.get_final_dmatpawu()
odmatpawu = np.array(dmatpawu).reshape(-1, ndim, ndim)
oparams = dmatpawu2params(odmatpawu, ndim)
print('DMATPAWU')
print('input')
print(idmatpawu)
print('output')
print(odmatpawu)
print('PARAMETRIC REPRESENTATION found at %s' % abinitout)
for i in sorted(list(iparams.keys())):
print(i)
print('input')
print(iparams[i])
print('output')
print(i)
print(oparams[i])
abo = AbinitOutput(abinitout)
if not abo.is_finished:
print('This output is not finished')
try:
nres2 = abo.get_energetics()['nres2'][-1]
etot = abo.get_energetics()['etot'][-1]
nscf = len(abo.get_energetics()['etot'])
print("%30s ETOT: %15.6f NRES2: %15.6e NumSCF: %3d" % (path, etot, nres2, nscf))
except:
print("ERROR: Could not get energetics from %s" % abinitout)
def check_status(basepath):
dirs = [x for x in os.listdir(basepath)
if os.path.isdir(basepath + os.sep + x) and os.path.isfile(basepath + os.sep + x + os.sep + 'abinit.in')]
print("%-40s %15s %15s %4s" % ("ABINIT output", "ETOT", 'nres2', 'nSCF'))
for i in dirs:
path = basepath + os.sep + i
abinitout = get_final_abinit_out(path)
if abinitout is None:
continue
abo = AbinitOutput(abinitout)
if not abo.is_finished:
continue
try:
nres2 = abo.get_energetics()['nres2'][-1]
etot = abo.get_energetics()['etot'][-1]
nscf = len(abo.get_energetics()['etot'])
print("%-40s %15.6f %15.6e %4d" % (abinitout, etot, nres2, nscf))
except:
print("ERROR: Could not get final energetics from %s" % abinitout)
def plot_polar(popu, basepath):
print('Plotting Euler Angles...')
dirs = [x for x in os.listdir(basepath)
if os.path.isdir(basepath + os.sep + x) and os.path.isfile(basepath + os.sep + x + os.sep + 'abinit.in')]
fig = plt.figure(figsize=(21, 1.2 * len(dirs)))
plt.subplots_adjust(left=0.0, bottom=0.0, right=0.99, top=0.99, wspace=None, hspace=None)
etots = []
for idir in dirs:
pm, etot = popu.get_final_properties(basepath + os.sep + idir)
etots.append(etot)
etots = np.array(etots)
sort_dirs = np.array(dirs)[etots.argsort()]
index = 0.0
for idir in sort_dirs:
pm, etot = popu.get_final_properties(basepath + os.sep + idir)
ea = np.array(pm['final_dmat']['euler_angles'])
# print(idir,etot)
# print(ea.shape)
nangles = ea.shape[1]
for j in range(nangles):
theta = ea[:, j]
dim = len(theta)
radii = np.ones(dim)
colors = np.arange(dim)
width = 0.1 * np.ones(dim)
ax = fig.add_axes([float(j) / nangles, index / (len(dirs) + 1), 1.0 / nangles, 1.0 / nangles],
projection='polar')
ax.yaxis.set_tick_params(labelsize=0)
ax.xaxis.set_tick_params(labelsize=0)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.spines['polar'].set_visible(True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(colors, bars):
bar.set_facecolor(plt.cm.viridis(float(r) / nangles))
bar.set_alpha(0.9)
index += 1.0
plt.savefig('OrbitalPolar.pdf')
plt.show()
def plot_status(basepath):
dirs = [x for x in os.listdir(basepath)
if os.path.isdir(basepath + os.sep + x) and os.path.isfile(basepath + os.sep + x + os.sep + 'abinit.in')]
abi = AbinitInput(basepath + os.sep + 'abinit.in')
nstep = abi['nstep']
print('Plotting Status...')
xx = {}
yy = {}
# Get the data:
max_nruns = 0
for path in dirs:
xx[path] = np.array([])
yy[path] = {}
yy[path]['nres2'] = np.array([])
yy[path]['etot'] = np.array([])
yy[path]['delta'] = np.array([])
for i in range(100):
if os.path.isfile(basepath + os.sep + path + os.sep + 'abinit_%02d.txt' % i):
abo = AbinitOutput(basepath + os.sep + path + os.sep + 'abinit_%02d.txt' % i)
if not abo.is_finished:
print('This output is not finished')
continue
if max_nruns < i:
max_nruns = i
try:
energ = abo.get_energetics()
except:
raise RuntimeError(
"Failed procesing: %s" % (basepath + os.sep + path + os.sep + 'abinit_%02d.txt' % i))
nres2 = energ['nres2'][-1]
etot = energ['etot'][-1]
nscf = len(energ['etot'])
x = np.arange(nstep * i, nstep * i + len(energ['nres2']))
yetot = np.array(energ['etot'])
ynres2 = np.array(energ['nres2'])
ydelta = np.array(np.abs(energ['deltaEh']))
xx[path] = np.concatenate((xx[path], x))
yy[path]['etot'] = np.concatenate((yy[path]['etot'], yetot))
yy[path]['nres2'] = np.concatenate((yy[path]['nres2'], ynres2))
yy[path]['delta'] = np.concatenate((yy[path]['delta'], ydelta))
print("%s ETOT:%15.6f NRES2=%15.6e Num SCF=%3d" % (path, etot, nres2, nscf))
# RESIDUAL
plt.figure(figsize=(8, 11))
miny = 1E-1
maxy = 1E-16
plt.subplots_adjust(left=0.1, bottom=0.05, right=0.99, top=0.99,
wspace=None, hspace=None)
for path in dirs:
if len(xx) > 0:
plt.semilogy(xx[path], yy[path]['nres2'], label=path[-4:], lw=0.1)
if miny > min(yy[path]['nres2']):
miny = min(yy[path]['nres2'])
if maxy < max(yy[path]['nres2']):
maxy = max(yy[path]['nres2'])
plt.ylim(miny, maxy)
for i in nstep * np.arange(max_nruns + 1):
plt.semilogy([i, i], [miny, maxy], '0.5', lw=0.1)
plt.xlim(0, max([max(xx[path]) for path in dirs]))
plt.legend()
plt.xlabel("SCF iteration")
plt.ylabel("Density Residual$^2$")
plt.savefig('Orbital_Residual.pdf')
# ETOT
miny = 1E6
maxy = -1E6
avg = 0
minlen = 100
plt.figure(figsize=(8, 11))
for path in dirs:
if len(xx) > 0:
plt.plot(xx[path], yy[path]['etot'], label=path[-4:])
if len(yy[path]['etot']) < minlen:
minlen = len(yy[path]['etot'])
avg = np.average(yy[path]['etot'][-int(minlen / 2):])
if miny > min(yy[path]['etot'][-int(minlen / 2):]):
miny = min(yy[path]['etot'][-int(minlen / 2):])
if maxy < max(yy[path]['etot'][-int(minlen / 2):]):
maxy = max(yy[path]['etot'][-int(minlen / 2):])
plt.subplots_adjust(left=0.15, bottom=0.05, right=0.95, top=0.95,
wspace=None, hspace=None)
newminy = miny - 0.1 * (maxy - miny)
newmaxy = maxy + 0.1 * (maxy - miny)
miny = newminy
maxy = newmaxy
plt.ylim(miny, maxy)
for i in nstep * np.arange(max_nruns + 1):
plt.plot([i, i], [miny, maxy], '0.5', lw=0.1)
plt.xlim(0, max([max(xx[path]) for path in dirs]))
plt.legend()
plt.xlabel("SCF iteration")
plt.ylabel("Energy")
plt.savefig('Orbital_ETotal.pdf')
# deltaEh
plt.figure(figsize=(8, 11))
miny = 1E-1
for path in dirs:
if len(xx) > 0:
plt.semilogy(xx[path], yy[path]['delta'], label=path[-4:], lw=0.1)
if miny > min(yy[path]['delta']):
miny = min(yy[path]['delta'])
plt.subplots_adjust(left=0.1, bottom=0.05, right=0.99, top=0.99,
wspace=None, hspace=None)
for i in nstep * np.arange(max_nruns + 1):
plt.semilogy([i, i], [miny, 1E3], '0.5', lw=0.1)
plt.ylim(miny, 1E3)
plt.xlim(0, max([max(xx[path]) for path in dirs]))
plt.legend()
plt.xlabel("SCF iteration")
plt.ylabel("Delta Energy")
plt.savefig('Orbital_deltaEh.pdf')
def create_population(num_candidates):
popu.random_population(num_candidates)
def safe_read_json(filename):
"""
Safely read a given filename, extract and returns the associated dictionary
from the JSON file
"""
if not os.path.exists(filename):
raise ValueError("ERROR: Could not read file: %s" % filename)
rf = open(filename)
try:
data = json.load(rf)
except ValueError:
raise ValueError("ERROR: File is not in proper JSON format: %s" % filename)
rf.close()
return data
def prepare_folders(scrpath):
for i in popu.members:
popu.prepare_folder(i, workdir=scrpath, source_dir=scrpath)
def set_populations(parser):
return parser.add_argument('-p', type=str, nargs='+', required=True, metavar='JSON_file',
help='Population settings (JSON file), includes settings to connect to server and the '
'population')
def evaluate(db_settings, queue_settings, abipath):
pcdb = get_database(db_settings)
print("[%s] Path for abinit.in: %s" % (pcdb.name, abipath))
popu = OrbitalDFTU(pcdb, abipath + os.sep + 'abinit.in')
popu.evaluator(queue_settings, abipath)
def search(db_settings, search_settings, abipath):
pcdb = get_database(db_settings)
print("[%s] Path for abinit.in: %s" % (pcdb.name, abipath))
popu = OrbitalDFTU(pcdb, abipath + os.sep + 'abinit.in')
if 'generation_size' in search_settings:
generation_size = search_settings.pop('generation_size')
else:
generation_size = 32
if 'stabilization_limit' in search_settings:
stabilization_limit = search_settings.pop('stabilization_limit')
else:
stabilization_limit = 10
fire = FireFly(popu, params=search_settings, generation_size=generation_size,
stabilization_limit=stabilization_limit)
fire.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Orbital DFTU Evaluator and Analysis Tool')
subparsers = parser.add_subparsers(help='commands', dest='subparser_name')
# The create command
create_parser = subparsers.add_parser('create', help='Create the database')
set_populations(create_parser)
# The populate command
populate_parser = subparsers.add_parser('populate', help='Add candidates to the population (used for testing)')
set_populations(populate_parser)
populate_parser.add_argument('-clean', action='store_true',
help='If true, clean the entire database before populate',
required=False, default=False)
populate_parser.add_argument('-size', type=int,
help='Number of candidates to populate (default: 16)',
required=False, default=16)
populate_parser.add_argument('-basepath', type=str,
help='Path where calculations are performed',
required=False, default='.')
# A run command
run_parser = subparsers.add_parser('run', help='Run Evaluator')
set_populations(run_parser)
run_parser.add_argument('-queue_settings', type=str,
help='Filename with PBS settings for launching jobs',
required=False, default='queue.json')
run_parser.add_argument('-basepath', type=str,
help='Path where calculations are performed (default: current folder)',
required=False, default='.')
# A searcher command
searcher_parser = subparsers.add_parser('search', help='Run PyChemia Global Searcher')
set_populations(searcher_parser)
searcher_parser.add_argument('-search_settings', type=str,
help='Filename with PBS settings for launching jobs',
required=False, default='searcher.json')
searcher_parser.add_argument('-basepath', type=str,
help='Path where calculations are performed (default: current folder)',
required=False, default='.')
# The plot command
plot_parser = subparsers.add_parser('plot', help='Generate several plots')
set_populations(plot_parser)
plot_parser.add_argument('-basepath', type=str,
help='Path where calculations are performed',
required=False, default='.')
args = parser.parse_args()
print(args)
# check all settings in args.p
dbs = []
for dbi_file in args.p:
dbi = safe_read_json(dbi_file)
print("DB settings: %s" % dbi)
assert('name' in dbi)
assert('u' in dbi)
assert('j' in dbi)
dbs.append(dbi)
if args.subparser_name == 'create':
pcdbs = []
for dbi in dbs:
pcdb = get_database(dbi)
pcdbs.append(pcdb)
print(pcdb)
print(pcdb.entries.count())
if args.subparser_name == 'run':
queue_settings = safe_read_json(args.queue_settings)
print("PBS settings: %s" % queue_settings)
# General actions for 'populate', 'run', 'search' and 'plot'
if args.subparser_name in ['run', 'plot', 'populate', 'search']:
if not os.path.isdir(args.basepath) or not os.path.isfile(args.basepath + os.sep + 'abinit.in'):
print('ERROR: Wrong basepath %s, directory must exist and contain a abinit.in file' % args.basepath)
parser.print_help()
sys.exit(1)
popu = {}
for dbi in dbs:
name = dbi['name']
pcdb = get_database(dbi)
if not os.path.isdir(args.basepath + os.sep + name):
os.mkdir(args.basepath + os.sep + name)
abi = AbinitInput(args.basepath + os.sep + 'abinit.in')
abi['upawu'] = ""
for i in dbi['u']:
abi['upawu'] += str(i) + " "
abi['upawu'] += 'eV'
abi['jpawu'] = ""
for i in dbi['j']:
abi['jpawu'] += str(i) + " "
abi['jpawu'] += 'eV'
abipath = args.basepath + os.sep + name + os.sep + 'abinit.in'
abi.write(abipath)
abifiles = args.basepath + os.sep + name + os.sep + 'abinit.files'
if os.path.lexists(abifiles):
os.remove(abifiles)
os.symlink(os.path.abspath(args.basepath + os.sep + 'abinit.files'), abifiles)
for i in [x for x in os.listdir(args.basepath) if x[-3:] == 'xml']:
psp = args.basepath + os.sep + name + os.sep + i
if os.path.lexists(psp):
os.remove(psp)
os.symlink(os.path.abspath(args.basepath + os.sep + i), psp)
popu[name] = OrbitalDFTU(pcdb, abipath)
# Specific actions for 'populate', 'run', 'search' and 'plot'
if args.subparser_name == 'populate':
for dbi in dbs:
name = dbi['name']
if args.clean:
popu[name].clean()
popu[name].random_population(args.size)
print("[%s] Total number of candidates: %d" % (name, len(popu[name])))
elif args.subparser_name == 'run':
sprocs = {}
for dbi in dbs:
name = dbi['name']
basepath = args.basepath + os.sep + name
if os.path.exists(basepath + os.sep + 'ERROR'):
print('ERROR: Something was wrong with %s' % abipath)
else:
sprocs[name] = multiprocessing.Process(target=evaluate, args=(dbi, queue_settings, basepath))
sprocs[name].start()
for dbi in dbs:
name = dbi['name']
sprocs[name].join()
elif args.subparser_name == 'search':
logging.basicConfig(level=logging.DEBUG)
search_settings = safe_read_json(args.search_settings)
print("Search settings from file: %s \n%s" % (args.search_settings, search_settings))
sprocs = {}
for dbi in dbs:
name = dbi['name']
basepath = args.basepath + os.sep + name
sprocs[name] = multiprocessing.Process(target=search, args=(dbi, search_settings, basepath))
sprocs[name].start()
print(list(sprocs.keys()))
for dbi in dbs:
name = dbi['name']
if name not in sprocs:
print('ERRROR: %s' % str(sprocs))
sprocs[name].join()
elif args.subparser_name == 'plot':
check_status(args.basepath)
plot_status(args.basepath)
plot_polar(popu, args.basepath)
| {
"content_hash": "5347e61dc7a58622ed05d595dc44fe8c",
"timestamp": "",
"source": "github",
"line_count": 509,
"max_line_length": 118,
"avg_line_length": 35.577603143418465,
"alnum_prop": 0.5584515986526036,
"repo_name": "MaterialsDiscovery/PyChemia",
"id": "b3d83d5ed9fbc7f28a1807f76ab08294ed0d311c",
"size": "18132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/OrbitalDFTU_Evaluator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1390398"
},
{
"name": "Shell",
"bytes": "325"
}
],
"symlink_target": ""
} |
"""
This module contains a class for discrete
1-dimensional exponential families. The main
uses for this class are exact (post-selection)
hypothesis tests and confidence intervals.
"""
import numpy as np
import warnings
from ..truncated.api import find_root
def crit_func(test_statistic, left_cut, right_cut):
"""
A generic critical function for an interval,
with weights at the endpoints.
((test_statistic < CL) + (test_statistic > CR) +
gammaL * (test_statistic == CL) +
gammaR * (test_statistic == CR))
where (CL, gammaL) = left_cut, (CR, gammaR) = right_cut.
Parameters
----------
test_statistic : np.float
Observed value of test statistic.
left_cut : (float, float)
(CL, gammaL): left endpoint and value at exactly the left endpoint (should be in [0,1]).
right_cut : (float, float)
(CR, gammaR): right endpoint and value at exactly the right endpoint (should be in [0,1]).
Returns
-------
decision : np.float
"""
CL, gammaL = left_cut
CR, gammaR = right_cut
value = ((test_statistic < CL) + (test_statistic > CR)) * 1.
if gammaL != 0:
value += gammaL * (test_statistic == CL)
if gammaR != 0:
value += gammaR * (test_statistic == CR)
return value
class discrete_family(object):
def __init__(self, sufficient_stat, weights, theta=0.):
r"""
A discrete 1-dimensional
exponential family with reference measure $\sum_j w_j \delta_{X_j}$
and sufficient statistic `sufficient_stat`. For any $\theta$, the distribution
is
.. math::
P_{\theta} = \sum_{j} e^{\theta X_j - \Lambda(\theta)} w_j \delta_{X_j}
where
.. math::
\Lambda(\theta) = \log \left(\sum_j w_j e^{\theta X_j} \right).
Parameters
----------
sufficient_stat : `np.float((n))`
weights : `np.float(n)`
Notes
-----
The weights are normalized to sum to 1.
"""
xw = np.array(sorted(zip(sufficient_stat, weights)), np.float)
self._x = xw[:,0]
self._w = xw[:,1]
self._lw = np.array([np.log(v) for v in xw[:,1]])
self._w /= self._w.sum() # make sure they are a pmf
self.n = len(xw)
self._theta = np.nan
self.theta = theta
@property
def theta(self):
"""
The natural parameter of the family.
"""
return self._theta
@theta.setter
def theta(self, _theta):
if _theta != self._theta:
_thetaX = _theta * self.sufficient_stat + self._lw
_largest = _thetaX.max() - 5 # try to avoid over/under flow, 5 seems arbitrary
_exp_thetaX = np.exp(_thetaX - _largest)
_prod = _exp_thetaX
self._partition = np.sum(_prod)
self._pdf = _prod / self._partition
self._partition *= np.exp(_largest)
self._theta = _theta
@property
def partition(self):
r"""
Partition function at `self.theta`:
.. math::
\sum_j e^{\theta X_j} w_j
"""
if hasattr(self, "_partition"):
return self._partition
@property
def sufficient_stat(self):
"""
Sufficient statistics of the exponential family.
"""
return self._x
@property
def weights(self):
"""
Weights of the exponential family.
"""
return self._w
def pdf(self, theta):
r"""
Density of $P_{\theta}$ with respect to $P_0$.
Parameters
----------
theta : float
Natural parameter.
Returns
-------
pdf : np.float
"""
self.theta = theta # compute partition if necessary
return self._pdf
def cdf(self, theta, x=None, gamma=1):
r"""
The cumulative distribution function of $P_{\theta}$ with
weight `gamma` at `x`
.. math::
P_{\theta}(X < x) + \gamma * P_{\theta}(X = x)
Parameters
----------
theta : float
Natural parameter.
x : float (optional)
Where to evaluate CDF.
gamma : float(optional)
Weight given at `x`.
Returns
-------
cdf : np.float
"""
pdf = self.pdf(theta)
if x is None:
return np.cumsum(pdf) - pdf * (1 - gamma)
else:
tr = np.sum(pdf * (self.sufficient_stat < x))
if x in self.sufficient_stat:
tr += gamma * np.sum(pdf[np.where(self.sufficient_stat == x)])
return tr
def ccdf(self, theta, x=None, gamma=0, return_unnorm=False):
r"""
The complementary cumulative distribution function
(i.e. survival function) of $P_{\theta}$ with
weight `gamma` at `x`
.. math::
P_{\theta}(X > x) + \gamma * P_{\theta}(X = x)
Parameters
----------
theta : float
Natural parameter.
x : float (optional)
Where to evaluate CCDF.
gamma : float(optional)
Weight given at `x`.
Returns
-------
ccdf : np.float
"""
pdf = self.pdf(theta)
if x is None:
return np.cumsum(pdf[::-1])[::-1] - pdf * (1 - gamma)
else:
tr = np.sum(pdf * (self.sufficient_stat > x))
if x in self.sufficient_stat:
tr += gamma * np.sum(pdf[np.where(self.sufficient_stat == x)])
return tr
def E(self, theta, func):
r"""
Expectation of `func` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func : callable
Assumed to be vectorized.
gamma : float(optional)
Weight given at `x`.
Returns
-------
E : np.float
"""
T = np.asarray(func(self.sufficient_stat))
pdf_ = self.pdf(theta)
if T.ndim == 1:
return (T * pdf_).sum()
else:
val = (T * pdf_[:,None]).sum(0)
return val
def Var(self, theta, func):
r"""
Variance of `func` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func : callable
Assumed to be vectorized.
Returns
-------
var : np.float
"""
mu = self.E(theta, func)
return self.E(theta, lambda x: (func(x)-mu)**2)
def Cov(self, theta, func1, func2):
r"""
Covariance of `func1` and `func2` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func1, func2 : callable
Assumed to be vectorized.
Returns
-------
cov : np.float
"""
mu1 = self.E(theta, func1)
mu2 = self.E(theta, func2)
return self.E(theta, lambda x: (func1(x)-mu1)*(func2(x)-mu2))
def two_sided_acceptance(self, theta, alpha=0.05, tol=1e-6):
r"""
Compute cutoffs of UMPU two-sided test.
Parameters
----------
theta : float
Natural parameter.
alpha : float (optional)
Size of two-sided test.
tol : float
Tolerance for root-finding.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
if theta != self._theta:
CL = np.max([x for x in self.sufficient_stat if self._critCovFromLeft(theta, (x, 0), alpha) >= 0])
gammaL = find_root(lambda x: self._critCovFromLeft(theta, (CL, x), alpha), 0., 0., 1., tol)
CR, gammaR = self._rightCutFromLeft(theta, (CL, gammaL), alpha)
self._left_cut, self._right_cut = (CL, gammaL), (CR, gammaR)
return self._left_cut, self._right_cut
def two_sided_test(self, theta0, observed, alpha=0.05, randomize=True, auxVar=None):
r"""
Perform UMPU two-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
if randomize:
if auxVar is None:
auxVar = np.random.random()
rejLeft = self._test2RejectsLeft(theta0, observed, alpha, auxVar)
rejRight = self._test2RejectsRight(theta0, observed, alpha, auxVar)
else:
rejLeft = self._test2RejectsLeft(theta0, observed, alpha)
rejRight = self._test2RejectsRight(theta0, observed, alpha)
return rejLeft or rejRight
def one_sided_test(self, theta0, observed, alternative='greater', alpha=0.05, randomize=True, auxVar=None):
r"""
Perform UMPU one-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alternative : str
One of ['greater', 'less']
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
if alternative not in ['greater', 'less']:
raise ValueError('alternative must be one of ["greater", "less"]')
self.theta = theta0
if randomize:
if auxVar is None:
auxVar = np.random.random()
if alternative == 'greater':
return self.ccdf(theta0, observed, gamma=auxVar) < alpha
else:
return self.cdf(theta0, observed, gamma=auxVar) < alpha
else:
if alternative == 'greater':
return self.ccdf(theta0, observed) < alpha
else:
return self.cdf(theta0, observed) < alpha
def interval(self, observed, alpha=0.05, randomize=True, auxVar=None, tol=1e-6):
"""
Form UMAU confidence interval.
Parameters
----------
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
lower, upper : float
Limits of confidence interval.
"""
if randomize:
if auxVar is None:
auxVar = np.random.random()
upper = self._inter2Upper(observed, auxVar, alpha, tol)
lower = self._inter2Lower(observed, auxVar, alpha, tol)
else:
upper = self._inter2Upper(observed, 1., alpha, tol)
lower = self._inter2Lower(observed, 0., alpha, tol)
return lower, upper
def equal_tailed_interval(self, observed, alpha=0.05, randomize=True, auxVar=None, tol=1e-6):
"""
Form interval by inverting
equal-tailed test with $\alpha/2$ in each tail.
Parameters
----------
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
lower, upper : float
Limits of confidence interval.
"""
mu = self.E(self.theta, lambda x: x)
sigma = np.sqrt(self.Var(self.theta, lambda x: x))
lb = mu - 20 * sigma
ub = mu + 20 * sigma
F = lambda th : self.cdf(th, observed)
L = find_root(F, 1.0 - 0.5 * alpha, lb, ub)
U = find_root(F, 0.5 * alpha, lb, ub)
return L, U
def equal_tailed_test(self, theta0, observed, alpha=0.05):
r"""
Perform UMPU two-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
pval = self.cdf(theta0, observed, gamma=0.5)
return min(pval, 1-pval) < alpha
def one_sided_acceptance(self, theta,
alpha=0.05,
alternative='greater',
tol=1e-6):
r"""
Compute the acceptance region cutoffs of UMPU one-sided test.
TODO: Include randomization?
Parameters
----------
theta : float
Natural parameter.
alpha : float (optional)
Size of two-sided test.
alternative : str
One of ['greater', 'less'].
tol : float
Tolerance for root-finding.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
if alternative == 'greater':
F = self.ccdf(theta, gamma=0.5)
cutoff = np.min(self.sufficient_stat[F <= alpha])
acceptance = (-np.inf, cutoff)
elif alternative == 'less':
F = self.ccdf(theta, gamma=0.5)
cutoff = np.max(self.sufficient_stat[F <= alpha])
acceptance = (cutoff, np.inf)
else:
raise ValueError("alternative should be one of ['greater', 'less']")
return acceptance
def equal_tailed_acceptance(self, theta0, alpha=0.05):
r"""
Compute the acceptance region cutoffs of
equal-tailed test (without randomization).
Therefore, size may not be exactly $\alpha$.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
alpha : float (optional)
Size of two-sided test.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
F = self.cdf(theta0, gamma=0.5)
Lcutoff = np.max(self.sufficient_stat[F <= 0.5 * alpha])
Rcutoff = np.min(self.sufficient_stat[F >= 1 - 0.5*alpha])
return Lcutoff, Rcutoff
def MLE(self, observed, initial=0,
max_iter=20, tol=1.e-4):
r"""
Compute the maximum likelihood estimator
based on observed sufficient statistic `observed`.
Parameters
----------
observed : float
Observed value of sufficient statistic
initial : float
Starting point for Newton-Raphson
max_iter : int (optional)
Maximum number of Newton-Raphson iterations
tol : float (optional)
Tolerance parameter for stopping, based
on relative change in parameter estimate.
Iteration stops when the change is smaller
than `tol * max(1, np.fabs(cur_estimate))`.
Returns
-------
theta_hat : float
Maximum likelihood estimator.
std_err : float
Estimated variance of `theta_hat` based
on inverse of variance of sufficient
statistic at `theta_hat`, i.e. the
observed Fisher information.
"""
cur_est = initial
def first_two_moments(x):
return np.array([x, x**2]).T
for i in range(max_iter):
cur_moments = self.E(cur_est, first_two_moments) # gradient and
# Hessian of CGF
# (almost)
grad, hessian = (cur_moments[0] - observed,
cur_moments[1] - cur_moments[0]**2)
next_est = cur_est - grad / hessian # newton step
if np.fabs(next_est - cur_est) < tol * max(1, np.fabs(cur_est)):
break
cur_est = next_est
if i == max_iter - 1:
warnings.warn('Newton-Raphson failed to converge after %d iterations' % max_iter)
cur_moments = self.E(cur_est, first_two_moments) # gradient and
# Hessian of CGF
# (almost)
grad, hessian = (cur_moments[0] - observed,
cur_moments[1] - cur_moments[0]**2)
return cur_est, 1. / hessian, grad
# Private methods
def _rightCutFromLeft(self, theta, leftCut, alpha=0.05):
"""
Given C1, gamma1, choose C2, gamma2 to make E(phi(X)) = alpha
"""
C1, gamma1 = leftCut
alpha1 = self.cdf(theta, C1, gamma1)
if alpha1 >= alpha:
return (np.inf, 1)
else:
alpha2 = alpha - alpha1
P = self.ccdf(theta, gamma=0)
idx = np.nonzero(P < alpha2)[0].min()
cut = self.sufficient_stat[idx]
pdf_term = np.exp(theta * cut) / self.partition * self.weights[idx]
ccdf_term = P[idx]
gamma2 = (alpha2 - ccdf_term) / pdf_term
return (cut, gamma2)
def _leftCutFromRight(self, theta, rightCut, alpha=0.05):
"""
Given C2, gamma2, choose C1, gamma1 to make E(phi(X)) = alpha
"""
C2, gamma2 = rightCut
alpha2 = self.ccdf(theta, C2, gamma2)
if alpha2 >= alpha:
return (-np.inf, 1)
else:
alpha1 = alpha - alpha2
P = self.cdf(theta, gamma=0)
idx = np.nonzero(P < alpha1)[0].max()
cut = self.sufficient_stat[idx]
cdf_term = P[idx]
pdf_term = np.exp(theta * cut) / self.partition * self.weights[idx]
gamma1 = (alpha1 - cdf_term) / pdf_term
return (cut, gamma1)
def _critCovFromLeft(self, theta, leftCut, alpha=0.05):
"""
Covariance of X with phi(X) where phi(X) is the level-alpha test with left cutoff C1, gamma1
"""
C1, gamma1 = leftCut
C2, gamma2 = self._rightCutFromLeft(theta, leftCut, alpha)
if C2 == np.inf:
return -np.inf
else:
return self.Cov(theta, lambda x: x, lambda x: crit_func(x, (C1, gamma1), (C2, gamma2)))
def _critCovFromRight(self, theta, rightCut, alpha=0.05):
"""
Covariance of X with phi(X) where phi(X) is the level-alpha test with right cutoff C2, gamma2
"""
C2, gamma2 = rightCut
C1, gamma1 = self._leftCutFromRight(theta, rightCut, alpha)
if C1 == -np.inf:
return np.inf
else:
return self.Cov(theta, lambda x: x, lambda x: crit_func(x, (C1, gamma1), (C2, gamma2)))
def _test2RejectsLeft(self, theta, observed, alpha=0.05, auxVar=1.):
"""
Returns 1 if x in left lobe of umpu two-sided rejection region
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to "larger" x, so LESS likely to reject
auxVar = 1 is conservative
"""
return self._critCovFromLeft(theta, (observed, auxVar), alpha) > 0
def _test2RejectsRight(self, theta, observed, alpha=0.05, auxVar=0.):
"""
Returns 1 if x in right lobe of umpu two-sided rejection region
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger," so MORE likely to reject.
auxVar = 0 is conservative.
"""
return self._critCovFromRight(theta, (observed, 1.-auxVar), alpha) < 0
def _inter2Upper(self, observed, auxVar, alpha=0.05, tol=1e-6):
"""
upper bound of two-sided umpu interval
"""
if observed < self.sufficient_stat[0] or (observed == self.sufficient_stat[0] and auxVar <= alpha):
return -np.inf # observed, auxVar too small, every test rejects left
if observed > self.sufficient_stat[self.n - 2] or (observed == self.sufficient_stat[self.n - 2] and auxVar == 1.):
return np.inf # observed, auxVar too large, no test rejects left
return find_root(lambda theta: -1*self._test2RejectsLeft(theta, observed, alpha, auxVar), -0.5, -1., 1., tol)
def _inter2Lower(self, observed, auxVar, alpha=0.05, tol=1e-6):
"""
lower bound of two-sided umpu interval
"""
if observed > self.sufficient_stat[self.n-1] or (observed == self.sufficient_stat[self.n-1] and auxVar >= 1.-alpha):
return np.inf # observed, auxVar too large, every test rejects right
if observed < self.sufficient_stat[1] or (observed == self.sufficient_stat[1] and auxVar == 0.):
return -np.inf # observed, auxVar too small, no test rejects right
return find_root(lambda theta: 1.*self._test2RejectsRight(theta, observed, alpha, auxVar), 0.5, -1., 1., tol)
| {
"content_hash": "4cac0e486555a476bfb0f4c513e7a576",
"timestamp": "",
"source": "github",
"line_count": 806,
"max_line_length": 124,
"avg_line_length": 30.393300248138956,
"alnum_prop": 0.5120627015552924,
"repo_name": "selective-inference/selective-inference",
"id": "5c6e6fc23df3d182a38d0e4a4de3e53900ea0cf3",
"size": "24497",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "selectinf/distributions/discrete_family.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "269"
},
{
"name": "C++",
"bytes": "13148"
},
{
"name": "Python",
"bytes": "572490"
},
{
"name": "R",
"bytes": "11134"
},
{
"name": "TeX",
"bytes": "3355"
}
],
"symlink_target": ""
} |
from model.group import Group
import pytest
def test_add_group(app, db, json_groups):
group = json_groups
with pytest.allure.step('Given a group list'):
old_groups = db.get_group_list()
with pytest.allure.step('When I add a group %s to the list' % group ):
app.group.create(group)
with pytest.allure.step('Then the new group lst is equal to the old list with the added group'):
new_groups = db.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
| {
"content_hash": "62e82b66757c77b06db2067aa61652ce",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 100,
"avg_line_length": 36.625,
"alnum_prop": 0.6672354948805461,
"repo_name": "PaulRumyantsev/python_QA",
"id": "026f0b4523476ea08ebfed656644f15dcf717e9c",
"size": "610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_add_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "394"
},
{
"name": "Gherkin",
"bytes": "1496"
},
{
"name": "HTML",
"bytes": "795"
},
{
"name": "JavaScript",
"bytes": "7037"
},
{
"name": "Python",
"bytes": "45503"
},
{
"name": "RobotFramework",
"bytes": "1943"
}
],
"symlink_target": ""
} |
from cattle import ApiError
from common_fixtures import * # NOQA
def _create_virtual_machine(client, context, **kw):
args = {
'accountId': context.project.id,
'imageUuid': context.image_uuid,
}
args.update(kw)
return client.create_virtual_machine(**args)
@pytest.fixture(scope='module')
def network(context):
return context.nsp.network()
@pytest.fixture(scope='module')
def subnet(network):
return network.subnets()[0]
def test_virtual_machine_default_fields(super_client, client, context):
disk_name = 'disk' + random_str()
disks = [
{
'size': '2g',
'opts': {
'foo': 'bar'
}
},
{
'name': disk_name,
'driver': 'foo',
}
]
vm = _create_virtual_machine(client, context,
volumeDriver='foo-bar',
userdata='hi', vcpu=2, memoryMb=42,
disks=disks)
vm = client.wait_success(vm)
assert vm.state == 'running'
assert vm.vcpu == 2
assert vm.memoryMb == 42
c = super_client.reload(vm)
assert c.labels['io.rancher.vm'] == 'true'
assert c.labels['io.rancher.vm.memory'] == '42'
assert c.labels['io.rancher.vm.vcpu'] == '2'
assert c.labels['io.rancher.vm.userdata'] == 'hi'
assert c.dataVolumes == ['/var/lib/rancher/vm:/vm',
'{}-00:/volumes/disk00'.format(c.uuid),
'{}:/volumes/disk01'.format(disk_name)]
assert c.dataVolumes == ['/var/lib/rancher/vm:/vm',
'{}-00:/volumes/disk00'.format(c.uuid),
'{}:/volumes/disk01'.format(disk_name)]
assert c.devices == ['/dev/kvm:/dev/kvm', '/dev/net/tun:/dev/net/tun']
assert c.capAdd == ['NET_ADMIN']
assert c.capabilities == ['console']
volume1 = find_one(client.list_volume, name=c.uuid + '-00')
assert volume1.driver == 'foo-bar'
assert volume1.driverOpts == {'vm': 'true', 'size': '2g', 'foo': 'bar'}
volume2 = find_one(client.list_volume, name=disk_name)
assert volume2.name == disk_name
assert volume2.driver == 'foo'
assert volume2.driverOpts == {'vm': 'true', 'size': '10g'}
assert c.dataVolumeMounts == {
'/volumes/disk00': volume1.id,
'/volumes/disk01': volume2.id,
}
def test_virtual_machine_stats(client, context):
vm = _create_virtual_machine(client, context, vcpu=2, memoryMb=42)
vm = client.wait_success(vm)
assert vm.state == 'running'
assert 'stats' in vm
assert 'containerStats' in vm
def test_virtual_machine_create_cpu_memory(client, context):
vm = _create_virtual_machine(client, context,
vcpu=2, memoryMb=42)
vm = client.wait_success(vm)
assert vm.state == 'running'
assert vm.vcpu == 2
assert vm.memoryMb == 42
def test_virtual_machine_create(super_client, context):
vm = _create_virtual_machine(super_client, context)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert vm.vcpu == 1
assert vm.memoryMb == 512
def test_virtual_machine_create_null_network_id(super_client, context):
image_uuid = context.image_uuid
try:
super_client.create_virtual_machine(imageUuid=image_uuid,
networkIds=[None])
assert False
except ApiError as e:
assert e.error.code == 'NotNullable'
def test_virtual_machine_n_ids_s_ids(super_client, context,
network, subnet):
image_uuid = context.image_uuid
try:
super_client.create_virtual_machine(imageUuid=image_uuid,
networkIds=[network.id],
subnetIds=[subnet.id])
except ApiError as e:
assert e.error.code == 'NetworkIdsSubnetIdsMutuallyExclusive'
def test_virtual_machine_network(super_client, context, network, subnet):
subnet_plain_id = get_plain_id(super_client, subnet)
vm = _create_virtual_machine(super_client, context,
networkIds=[network.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert 'networkIds' not in vm
nics = vm.nics()
assert len(nics) == 1
nic = nics[0]
assert nic.network().id == network.id
assert nic.state == 'active'
assert nic.macAddress is not None
assert nic.macAddress.startswith(network.macPrefix)
nic_admin = super_client.reload(nic)
vm_admin = super_client.reload(vm)
assert nic_admin.account().id == vm_admin.accountId
ips = nic.ipAddresses()
assert len(ips) == 1
assert super_client.reload(nic).ipAddressNicMaps()[0].state == 'active'
ip = ips[0]
ip_admin = super_client.reload(ip)
assert ip_admin.account().id == vm_admin.accountId
assert ip_admin.subnet().id == nic_admin.subnet().id
assert ip_admin.role == 'primary'
assert ip.address is not None
assert ip.address.startswith('10.42')
assert vm.primaryIpAddress is not None
assert vm.primaryIpAddress == ip.address
addresses = super_client.list_resource_pool(poolType='subnet',
poolId=subnet_plain_id)
assert ip.address in [x.item for x in addresses]
def test_virtual_machine_subnet(super_client, context, subnet):
network = subnet.network()
vm = _create_virtual_machine(super_client, context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert 'subnetIds' not in vm
nics = vm.nics()
assert len(nics) == 1
nic = nics[0]
assert nic.subnetId == subnet.id
assert nic.network().id == network.id
assert nic.state == 'active'
ips = nic.ipAddresses()
assert len(ips) == 1
ip = ips[0]
assert ip.address is not None
assert ip.address.startswith('10.42')
assert vm.primaryIpAddress is not None
assert vm.primaryIpAddress == ip.address
def test_virtual_machine_no_ip(super_client, context):
account_id = context.project.id
network = super_client.create_network(accountId=account_id)
subnet = super_client.create_subnet(networkAddress='192.168.0.0',
accountId=account_id,
cidrSize='16',
networkId=network.id,
startAddress='192.168.0.3',
endAddress='192.168.0.3')
subnet = super_client.wait_success(subnet)
assert subnet.state == 'active'
vm = _create_virtual_machine(super_client, context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert vm.primaryIpAddress == '192.168.0.3'
vm = _create_virtual_machine(super_client, context,
subnetIds=[subnet.id])
vm = super_client.wait_transitioning(vm)
assert vm.state == 'removed'
assert vm.transitioning == 'error'
assert vm.transitioningMessage == \
'Failed to allocate IP from subnet : IP allocation error'
def test_virtual_machine_stop_subnet(super_client, context, subnet):
vm = _create_virtual_machine(super_client, context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
assert vm.nics()[0].ipAddresses()[0].address.startswith('10.42')
vm = super_client.wait_success(vm.stop())
assert vm.state == 'stopped'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
nic = vm.nics()[0]
ip_address = nic.ipAddresses()[0]
assert ip_address.state == 'active'
assert ip_address.address.startswith('10.42')
assert nic.state == 'inactive'
def test_virtual_machine_remove_subnet(super_client, context, subnet):
vm = _create_virtual_machine(super_client, context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
assert vm.nics()[0].ipAddresses()[0].address.startswith('10.42')
vm = super_client.wait_success(vm.stop(remove=True))
assert vm.state == 'removed'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
nic = vm.nics()[0]
ip_address = nic.ipAddresses()[0]
assert ip_address.state == 'active'
assert ip_address.address.startswith('10.42')
assert nic.state == 'removed'
def test_virtual_machine_purge_subnet(super_client, context, subnet):
subnet_plain_id = get_plain_id(super_client, subnet)
vm = _create_virtual_machine(super_client, context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
addresses = super_client.list_resource_pool(poolType='subnet',
poolId=subnet_plain_id)
assert vm.primaryIpAddress in [x.item for x in addresses]
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
assert vm.nics()[0].ipAddresses()[0].address.startswith('10.42')
vm = super_client.wait_success(vm.stop(remove=True))
assert vm.state == 'removed'
assert len(vm.nics()) == 1
assert len(vm.nics()[0].ipAddresses()) == 1
nic = vm.nics()[0]
ip_address = nic.ipAddresses()[0]
assert ip_address.state == 'active'
assert ip_address.address.startswith('10.42')
assert nic.state == 'removed'
vm = super_client.wait_success(vm.purge())
assert vm.state == 'purged'
nics = vm.nics()
assert len(nics) == 1
nic = nics[0]
assert nic.state == 'removed'
assert nic.macAddress is not None
nic = super_client.wait_success(nic.purge())
assert nic.state == 'purged'
assert nic.macAddress is None
assert len(nic.ipAddressNicMaps()) == 1
assert nic.ipAddressNicMaps()[0].state == 'removed'
assert len(nic.ipAddresses()) == 0
ip_address = super_client.reload(ip_address)
assert ip_address.state == 'removed'
assert ip_address.address is not None
addresses = super_client.list_resource_pool(poolType='subnet',
poolId=subnet_plain_id)
assert vm.primaryIpAddress not in [x.item for x in addresses]
def test_virtual_machine_restore_subnet(super_client, context, subnet):
subnet_plain_id = get_plain_id(super_client, subnet)
vm = _create_virtual_machine(super_client, context,
subnetIds=[subnet.id])
vm = super_client.wait_success(vm)
assert vm.state == 'running'
addresses = super_client.list_resource_pool(poolType='subnet',
poolId=subnet_plain_id)
assert vm.primaryIpAddress in [x.item for x in addresses]
vm = super_client.wait_success(vm.stop())
assert vm.state == 'stopped'
vm = super_client.wait_success(super_client.delete(vm))
assert vm.state == 'removed'
assert vm.state == 'removed'
nic = vm.nics()[0]
ip_address = nic.ipAddresses()[0]
address = ip_address.address
assert ip_address.address.startswith('10.42')
vm = vm.restore()
assert vm.state == 'restoring'
vm = super_client.wait_success(vm)
assert vm.state == 'stopped'
assert len(vm.nics()) == 1
nic = vm.nics()[0]
assert nic.state == 'inactive'
assert len(nic.ipAddresses()) == 1
ip_address = nic.ipAddresses()[0]
assert ip_address.state == 'active'
vm = super_client.wait_success(vm.start())
assert vm.state == 'running'
assert vm.nics()[0].ipAddresses()[0].address == address
def test_virtual_machine_console(super_client, context):
vm = _create_virtual_machine(super_client, context)
vm = super_client.wait_success(vm)
assert 'console' in vm
assert 'console' in vm and callable(vm.console)
console = vm.console()
assert console is not None
assert console.url.endswith('/v1/console/')
def test_virtual_machine_console_visibility(super_client, context):
vm = _create_virtual_machine(super_client, context)
vm = super_client.wait_success(vm)
assert 'console' in vm
assert 'console' in vm and callable(vm.console)
vm = super_client.wait_success(vm.stop())
assert vm.state == 'stopped'
assert 'console' not in vm
| {
"content_hash": "fe56ac0c7c957f1fe4a617ab2ed0cc7c",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 75,
"avg_line_length": 31.149144254278728,
"alnum_prop": 0.6010989010989011,
"repo_name": "stresler/cattle",
"id": "662771752b0a6c8c14b3cc1b602c67baba3510eb",
"size": "12740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/cattletest/core/test_virtual_machine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "FreeMarker",
"bytes": "17738"
},
{
"name": "Java",
"bytes": "5536744"
},
{
"name": "Python",
"bytes": "650065"
},
{
"name": "Shell",
"bytes": "44520"
}
],
"symlink_target": ""
} |
"""
CLI tool for retrieving artifacts from CircleCI
Usage:
circleci-getter [--debug] --user=USER --project=PROJECT [--branch=BRANCH] [--filter=FILTER]
[--out=OUT] [--token=TOKEN]
Options:
--debug Print debug info
--help Print this message
--user=USER GitHub organisation name or user name
--project=PROJECT GitHub project name
--branch=BRANCH Branch from where to get artifacts. [default: master]
--filter=FILTER Get only files that match provided filter (use Python re format) [default: .*]
--out=OUT Directory to put downloaded artifacts to [default: out]
--token=TOKEN Env var name to read CircleCI API token from [default: TOKEN]
"""
from docopt import docopt
import requests
import copy
import json
import logging
import os
import re
BASE_URL = 'https://circleci.com/api/v1.1/project/github'
def get_options(args):
result = {}
for key, value in args.items():
result[key.lstrip('--')] = value
logging.debug('Extracted options: {}'.format(result))
return result
def get_token(token_name):
if token_name in os.environ:
return os.environ[token_name]
raise EnvironmentError('Can not read env variable {}'.format(args['--token']))
def send_request(url, params):
headers = {'Accept': 'application/json'}
result = requests.get(url, params = params, headers = headers)
if result.status_code == 200:
return result
else:
raise IOError('Received code {}: {}'.format(result.status_code, result.text))
def get_build_number(base_url, branch, token):
logging.info('Getting latest successful build on {}'.format(branch))
params = {'circle-token': token, 'limit': 1, 'filter': 'successful'}
url = '{}/tree/{}'.format(base_url, branch)
latest_build = send_request(url, params)
latest_build_json = json.loads(latest_build.text)
return latest_build_json[0]['build_num']
def get_artifacts_url_as_list(base_url, build_number, artifact_filter, token):
logging.info('Look up artifacts url for build number #{} ...'.format(build_number))
params = {'circle-token': token}
url = '{}/{}/artifacts'.format(base_url, build_number)
artifacts = send_request(url, params)
result = []
for artifact in json.loads(artifacts.text):
# If matches then return it
if re.match(artifact_filter, artifact['path']):
result.append(artifact['url'])
return result
def download_files(urls, out, token):
logging.info('Downloading files to {} ...'.format(out))
params = {'circle-token': token}
if not os.path.exists(out):
try:
os.makedirs(out)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
for url in urls:
rsp = send_request(url, params)
with open(os.path.join(out, os.path.basename(url)), "w") as f:
f.write(rsp.text)
logging.info('Wrote {}'.format(f.name))
def main():
arguments = docopt(__doc__)
if arguments['--debug']:
logging.basicConfig(level=logging.DEBUG)
else:
logging.getLogger('requests').setLevel(logging.WARNING)
logging.basicConfig(format='%(message)s', level=logging.INFO)
token = get_token(arguments['--token'])
options = get_options(arguments)
url = '{}/{}/{}'.format(BASE_URL, options['user'], options['project'])
build_number = get_build_number(url, options['branch'], token)
logging.info('Latest successful build on {} is #{}'.format(options['branch'], build_number))
artifacts_url_list = get_artifacts_url_as_list(url, build_number, options['filter'], token)
logging.info('Got the following URLs: {}'.format(artifacts_url_list))
download_files(artifacts_url_list, options['out'], token)
if __name__ == "__main__":
main()
"""
base_payload = {'circle-token': '214bed4d74229d31da1b4ab0f8490361b59a19bf'}
headers = {'Accept': 'application/json'}
base_url = 'https://circleci.com/api/v1.1/project/github/transisland/platform'
branch = 'staging'
# Get latest build for the staging branch
print 'Getting latest successful build on {}'.format(branch)
latest_build_payload = copy.copy(base_payload)
latest_build_payload['limit'] = 1
latest_build_payload['filter'] = 'successful'
latest_build_url = '{}/tree/{}'.format(base_url, branch)
print latest_build_payload
print latest_build_url
latest_build = requests.get(latest_build_url,
params = latest_build_payload,
headers = headers)
latest_build_json = json.loads(latest_build.text)
latest_build_number = latest_build_json[0]['build_num']
print 'Latest successful build on {} is #{}: {}'.format(branch, latest_build_number, latest_build_json[0]['build_url'])
print 'Look up artifacts url...'
build_artifacts_url = '{}/{}/artifacts'.format(base_url, latest_build_number)
build_artifacts = requests.get(build_artifacts_url,
params = base_payload,
headers = headers)
for artifact in json.loads(build_artifacts.text):
print artifact['url']
"""
'''
rsp = requests.get('https://circleci.com/api/v1.1/project/github/transisland/platform/latest/artifacts', params=payload)
print 'text ' + rsp.text
print json.loads(rsp.text)
if rsp.status_code == 200:
logging.info(rsp.json())
else:
logging.error(rsp.status_code)
'''
| {
"content_hash": "7163737006f098880142300f2da240e0",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 120,
"avg_line_length": 35.23376623376623,
"alnum_prop": 0.6566531514928123,
"repo_name": "transisland/circleci-artifact-getter",
"id": "57076783c5227aa8984e0e0e5cb41c9a2c260a71",
"size": "5449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "circleci-getter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5449"
},
{
"name": "Shell",
"bytes": "372"
}
],
"symlink_target": ""
} |
"""A module for capturing time-related functions.
This makes mocking for time-related functionality easier.
"""
import calendar
import datetime
import re
import time
def CurrentTimeSec():
"""Returns a float of the current time in seconds."""
return time.time()
def Sleep(duration_sec):
"""Sleeps for the given duration."""
time.sleep(duration_sec)
def CurrentDatetimeUtc():
"""Returns the current date and time in the UTC timezone."""
return datetime.datetime.utcnow()
def IsExpired(timestamp_rfc3993_str):
no_expiration = ''
if timestamp_rfc3993_str == no_expiration:
return False
timestamp_unix = Strptime(timestamp_rfc3993_str)
if timestamp_unix < CurrentTimeSec():
return True
return False
# Parsing code for rfc3339 timestamps, taken from Google's rfc3339.py.
# TODO(user): Investigate opensourcing rfc3999.py
def Strptime(rfc3339_str):
"""Converts an RFC 3339 timestamp to Unix time in seconds since the epoch.
Args:
rfc3339_str: a timestamp in RFC 3339 format (yyyy-mm-ddThh:mm:ss.sss
followed by a time zone, given as Z, +hh:mm, or -hh:mm)
Returns:
a number of seconds since January 1, 1970, 00:00:00 UTC
Raises:
ValueError: if the timestamp is not in an acceptable format
"""
match = re.match(r'(\d\d\d\d)-(\d\d)-(\d\d)T'
r'(\d\d):(\d\d):(\d\d)(?:\.(\d+))?'
r'(?:(Z)|([-+])(\d\d):(\d\d))', rfc3339_str)
if not match:
raise ValueError('not a valid timestamp: %r' % rfc3339_str)
(year, month, day, hour, minute, second, frac_seconds,
zulu, zone_sign, zone_hours, zone_minutes) = match.groups()
time_tuple = map(int, [year, month, day, hour, minute, second])
# Parse the time zone offset.
if zulu == 'Z': # explicit
zone_offset = 0
else:
zone_offset = int(zone_hours) * 3600 + int(zone_minutes) * 60
if zone_sign == '-':
zone_offset = -zone_offset
integer_time = calendar.timegm(time_tuple) - zone_offset
if frac_seconds:
sig_dig = len(frac_seconds)
return ((integer_time * (10 ** sig_dig)
+ int(frac_seconds)) * (10 ** -sig_dig))
else:
return integer_time
def CalculateExpiration(num_seconds):
"""Takes a number of seconds and returns the expiration time in RFC 3339."""
if num_seconds is None:
return None
utc_now = CurrentDatetimeUtc()
adjusted = utc_now + datetime.timedelta(0, int(num_seconds))
formatted_expiration = _FormatDateString(adjusted)
return formatted_expiration
def _FormatDateString(d):
return ('%04d-%02d-%02dT%02d:%02d:%02dZ' %
(d.year, d.month, d.day, d.hour, d.minute, d.second))
| {
"content_hash": "6a4739e5f5a0c58eaf1f59bcf15d767b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 28.268817204301076,
"alnum_prop": 0.6610878661087866,
"repo_name": "flgiordano/netcash",
"id": "773bc9824a34576cef31a4adf9887eb7cf609a27",
"size": "3224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/time_utils.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
__author__ = 'mario'
import SafeBehaviour
class BatteryFailsafeBehaviour(SafeBehaviour.SafeBehaviour):
"""
Defines a battery failsafe behaviour.
If the battery is below a certain threshold, the behaviour sends a message requesting to land.
"""
def __init__(self, battery, minimum_voltage, vehicle):
SafeBehaviour.SafeBehaviour.__init__(self, 20)
self.battery = battery
self.minimum_voltage = minimum_voltage
self.vehicle = vehicle
def run(self):
"""Executes the behaviour, creating a Command object, asking the UAV to land"""
#TODO this is wrong, the state should be local...or not
if self.vehicle.mode.name != "LAND":
if abs(self.battery.voltage - self.minimum_voltage) <= 0.01:
return SafeBehaviour.land
return SafeBehaviour.SafeBehaviour.do_nothing
| {
"content_hash": "293ba58bc0267fb41638b9f6bdd17a38",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 98,
"avg_line_length": 31.357142857142858,
"alnum_prop": 0.6617312072892938,
"repo_name": "mhct/droneuaw",
"id": "576c6a76a04d30675881074d9e338784c8aaa535",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BatteryFailsafeBehaviour.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "OpenSCAD",
"bytes": "2322"
},
{
"name": "Python",
"bytes": "21543"
}
],
"symlink_target": ""
} |
'''
Created on 2014-3-27
@author: Hali
'''
import sys
import os
import Image
from PlistParser import Frame
def printUsage():
print "Usage: ImageUtils.py [-s input=srcImgPath outSize=[(width,heigh)|(x,y,width,heigt)] outPath=outPath]"
print " [-c input=srcImgPath srcRect=(x,y,w,h) outPath=outPath]"
print " [-cs input=srcImgPath srcRect=(x,y,w,h) outPath=outPath outSize=(w,h)]"
print "Options:"
print " -s scale the image to input size"
print " input: srcImgPath the source image to scale"
print " outSize: size of image to scale [no space]"
print " outPath: path of Image to save"
print ""
print " -c crop the rect of image and save to outPath"
print " input: srcImgPath the source image to crop"
print " srcRect: rect of image to be crop [no space]"
print " outPath: path of croped Image to save"
print ""
print " -cs crop the rect of image and save to outPath"
print " input: srcImgPath the source image to crop"
print " srcRect: rect of image to be crop [no space]"
print " outPath: path of croped Image to save"
print " outSize: size of image crop to sace [no space]"
print ""
print "Scale Sample: ./ImageUtils.py -s input=./test.png outSize={20,20} outPath=./test-scale.png"
print "Crop Sample: ./ImageUtils.py -c input=./test.png srcRect={0,0,20,20} outPath=./test-crop.png"
print "Crop&Scale Sample: ./ImageUtils.py -cs input=./test.png outSize={10,10,20,20} outPath=./test-scale.png outSize=(100,100)"
print ""
def scaleImg(img, box):
if len(box) != 4:
print "box arg len is Not enough!"
sys.exit();
if (box[2] == 0 or box[3] == 0):
print "Error! outImg size(%d, %d) invalid!" % (box[2], box[3])
sys.exit()
img = img.resize((box[2], box[3]))
newImg = Image.new("RGB", (box[2], box[3]), (255, 255, 255))
newImg.putalpha(0)
newImg.paste(img)
return newImg
def cropImg(img, frame):
x, y = int(frame.x), int(frame.y)
w, h = int(frame.w), int(frame.h)
ox, oy = int(frame.ox), int(frame.oy)
ow, oh = int(frame.ow), int(frame.oh)
px = int((ow - w)/2 + ox)
py = int((oh - h)/2 - oy)
rotation = 0
if frame.rotated == True:
w, h = h, w
rotation = 90
box = (x, y, x + w, y + h)
if frame.ow == 0:
frame.ow = 1
if frame.oh == 0:
frame.oh = 1
newImg = img.resize((frame.ow, frame.oh))
newImg.putalpha(255)
if w > 0 and h > 0:
cropImg = img.crop(box)
cropImg = cropImg.rotate(rotation)
for i in range(cropImg.size[0]):
for j in range(cropImg.size[1]):
newImg.putpixel((i + px, j + py), cropImg.getpixel((i, j)))
return newImg
def checkArgs(args):
if (len(args) != 4 and len(args) != 5):
printUsage()
sys.exit()
argMode = args[0]
if argMode == "-s":
inputPath, outSize, outPath = args[1:]
inputPath = inputPath.split("=")[1]
outPath = outPath.split("=")[1]
outSize = outSize.split("=")[1]
outSize = outSize.replace("(", "")
outSize = outSize.replace(")", "")
sizeArg = outSize.split(",")
if len(sizeArg) == 2:
outSize = (0, 0, int(sizeArg[0]), int(sizeArg[1]))
elif len(sizeArg) == 4:
outSize = (int(sizeArg[0]), int(sizeArg[1]), int(sizeArg[2]), int(sizeArg[3]))
if not os.path.exists(inputPath):
print "input filePath(%s) not exist!" % inputPath
sys.exit()
inputImg = Image.open(inputPath)
newImg = scaleImg(inputImg, outSize)
dirName = os.path.dirname(outPath)
if not os.path.exists(dirName):
os.makedirs(dirName)
newImg.save(outPath)
elif argMode == "-c":
inputPath, srcRect, outPath = args[1:]
inputPath = inputPath.split("=")[1]
outPath = outPath.split("=")[1]
srcRect = srcRect.split("=")[1]
srcRect = srcRect.replace("(", "")
srcRect = srcRect.replace(")", "")
rectArg = srcRect.split(",")
if not len(rectArg) == 4:
print "in crop mode, src rect arg(%s) invalid!" % (args[2].split("=")[1])
sys.exit()
srcRect = (int(rectArg[0]), int(rectArg[1]), int(rectArg[2]), int(rectArg[3]))
if not os.path.exists(inputPath):
print "input filePath(%s) not exist!" % inputPath
sys.exit()
inputImg = Image.open(inputPath)
frame = Frame()
x, y, w, h = srcRect
frame.init( x, y, w, h, 0, 0, w, h)
newImg = cropImg(inputImg, frame)
newImg.save(outPath)
elif argMode == "-cs":
inputPath, srcRect, outPath, outSize = args[1:]
inputPath = inputPath.split("=")[1]
outPath = outPath.split("=")[1]
srcRect = srcRect.split("=")[1]
srcRect = srcRect.replace("(", "")
srcRect = srcRect.replace(")", "")
rectArg = srcRect.split(",")
if not len(rectArg) == 4:
print "in crop mode, src rect arg(%s) invalid!" % (args[2].split("=")[1])
sys.exit()
srcRect = (int(rectArg[0]), int(rectArg[1]), int(rectArg[2]), int(rectArg[3]))
outSize = outSize.split("=")[1]
outSize = outSize.replace("(", "")
outSize = outSize.replace(")", "")
sizeArg = outSize.split(",")
if not len(sizeArg) == 2:
print "in crop mode, out size arg(%s) invalid!" % (args[2].split("=")[1])
sys.exit()
outSize = (int(sizeArg[0]), int(sizeArg[1]))
if not os.path.exists(inputPath):
print "input filePath(%s) not exist!" % inputPath
sys.exit()
inputImg = Image.open(inputPath)
frame = Frame()
x, y, w, h = srcRect
ow, oh = outSize
frame.init( x, y, w, h, 0, 0, w, h)
newImg = cropImg(inputImg, frame)
newImg = scaleImg(newImg, (0, 0, ow, oh))
newImg.save(outPath)
if __name__ == '__main__':
curDir = os.getcwd()
checkArgs(sys.argv[1:]) | {
"content_hash": "d8236488ed0ee9d79ca6165cbb98c3ac",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 132,
"avg_line_length": 33.705263157894734,
"alnum_prop": 0.5271705184259837,
"repo_name": "ywl19891989/PlistParseUtils",
"id": "2266751aef2f132dfd969a77b3f0b1c0131b7fad",
"size": "6417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/module/ImageUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35022"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
import json
import os
import re
import sys
from pathlib import Path
from subprocess import Popen, PIPE
from urllib.parse import urlsplit, urlunsplit
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.http import Request
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
from tests.spiders import SimpleSpider, SingleRequestSpider
class MitmProxy:
auth_user = 'scrapy'
auth_pass = 'scrapy'
def start(self):
from scrapy.utils.test import get_testenv
script = """
import sys
from mitmproxy.tools.main import mitmdump
sys.argv[0] = "mitmdump"
sys.exit(mitmdump())
"""
cert_path = Path(__file__).parent.resolve() / 'keys' / 'mitmproxy-ca.pem'
self.proc = Popen([sys.executable,
'-c', script,
'--listen-host', '127.0.0.1',
'--listen-port', '0',
'--proxyauth', f'{self.auth_user}:{self.auth_pass}',
'--certs', str(cert_path),
'--ssl-insecure',
],
stdout=PIPE, env=get_testenv())
line = self.proc.stdout.readline().decode('utf-8')
host_port = re.search(r'listening at http://([^:]+:\d+)', line).group(1)
address = f'http://{self.auth_user}:{self.auth_pass}@{host_port}'
return address
def stop(self):
self.proc.kill()
self.proc.communicate()
def _wrong_credentials(proxy_url):
bad_auth_proxy = list(urlsplit(proxy_url))
bad_auth_proxy[1] = bad_auth_proxy[1].replace('scrapy:scrapy@', 'wrong:wronger@')
return urlunsplit(bad_auth_proxy)
class ProxyConnectTestCase(TestCase):
def setUp(self):
try:
import mitmproxy # noqa: F401
except ImportError:
self.skipTest('mitmproxy is not installed')
self.mockserver = MockServer()
self.mockserver.__enter__()
self._oldenv = os.environ.copy()
self._proxy = MitmProxy()
proxy_url = self._proxy.start()
os.environ['https_proxy'] = proxy_url
os.environ['http_proxy'] = proxy_url
def tearDown(self):
self.mockserver.__exit__(None, None, None)
self._proxy.stop()
os.environ = self._oldenv
@defer.inlineCallbacks
def test_https_connect_tunnel(self):
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(self.mockserver.url("/status?n=200", is_secure=True))
self._assert_got_response_code(200, log)
@defer.inlineCallbacks
def test_https_tunnel_auth_error(self):
os.environ['https_proxy'] = _wrong_credentials(os.environ['https_proxy'])
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(self.mockserver.url("/status?n=200", is_secure=True))
# The proxy returns a 407 error code but it does not reach the client;
# he just sees a TunnelError.
self._assert_got_tunnel_error(log)
@defer.inlineCallbacks
def test_https_tunnel_without_leak_proxy_authorization_header(self):
request = Request(self.mockserver.url("/echo", is_secure=True))
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(seed=request)
self._assert_got_response_code(200, log)
echo = json.loads(crawler.spider.meta['responses'][0].text)
self.assertTrue('Proxy-Authorization' not in echo['headers'])
def _assert_got_response_code(self, code, log):
print(log)
self.assertEqual(str(log).count(f'Crawled ({code})'), 1)
def _assert_got_tunnel_error(self, log):
print(log)
self.assertIn('TunnelError', str(log))
| {
"content_hash": "7c4980b517c097dc179846937e700027",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 85,
"avg_line_length": 34.794642857142854,
"alnum_prop": 0.6138054914036438,
"repo_name": "scrapy/scrapy",
"id": "ea7701b5d1769e834cc8fa40013844e48caced03",
"size": "3897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_proxy_connect.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "2021119"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
from .utils import Utils
import json
import socket
class SocketException(Exception):
pass
class SocketUtils:
end_delim = b"\r\n\r\n"
@staticmethod
def send_json(address, port, msg, recv=False):
encoded_msg = Utils.safe_enc(json.dumps(Utils.safe_dec(msg)))
return SocketUtils.send(address, port, encoded_msg, recv)
@staticmethod
def send(address, port, msg, recv=False):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((address, port))
s.sendall(msg + SocketUtils.end_delim)
if recv:
data = b""
while True:
data += s.recv(1024)
if not data or data.endswith(SocketUtils.end_delim):
break
s.close()
return data
else:
s.close()
except: # TODO: distinguish between exceptions
raise SocketException
@staticmethod
def ip_for_host(host):
try:
return socket.gethostbyaddr(host)[2][0]
except:
return None
| {
"content_hash": "34e182152cf79dfd793d0f441451b10e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 72,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.5393162393162393,
"repo_name": "billychasen/billots",
"id": "859a64f15e2a7dbe0b27d1edf21319927fff45d2",
"size": "1280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "billots/src/utils/socket_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45288"
}
],
"symlink_target": ""
} |
"""
premium question
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.sums = []
def checkEqualTree(self, root: TreeNode) -> bool:
"""
To save 2nd pass, store sums
space: O(N)
"""
self.dfs(root)
total = self.sums.pop()
return total % 2 == 0 and total // 2 in self.sums
def dfs(self, node):
if not node:
return 0
l = self.dfs(node.left)
r = self.dfs(node.right)
s = l + r + node.val
self.sums.append(s)
return s
class Solution:
def __init__(self):
"""
Save space, two passes
"""
self.exists = False
self.root = None # need to handle 0
self.total_sum = None
def checkEqualTree(self, root: TreeNode) -> bool:
"""
two passes
1st pass, get total sum
2nd pass, check whether has sum/2
space: O(log N)
To save 2nd pass, store sums
space: O(N)
"""
self.root = root
self.total_sum = self.dfs(root)
self.dfs(root)
return self.exists
def dfs(self, node):
if not node:
return 0
l = self.dfs(node.left)
r = self.dfs(node.right)
s = l + r + node.val
if node != self.root and self.total_sum != None and self.total_sum == s * 2:
self.exists = True
return s
| {
"content_hash": "df5c99584bf18ac98a70a44d6561d0cf",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 84,
"avg_line_length": 22.056338028169016,
"alnum_prop": 0.5019157088122606,
"repo_name": "algorhythms/LeetCode",
"id": "b93549dd17858a35a3f45794079f28ca118bec7f",
"size": "1585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "663 Equal Tree Partition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1444167"
}
],
"symlink_target": ""
} |
import requests
import sys, argparse
import os
import hashlib
import json
YAHOO_API_URL="https://query.yahooapis.com/v1/public/yql"
YAHOO_API_PARAMS={"format":"json","env":"store%3A%2F%2Fdatatables.org%2Falltableswithkeys"}
def main(args):
for yqlquery in args.infile:
filename=hashlib.md5(yqlquery.encode('utf-8')).hexdigest()
yqlquery = yqlquery.rstrip()
yqlquery=yqlquery.replace(" ","%20")
yqlquery=yqlquery.replace('"',"%22")
query = {"q":yqlquery}
params = {**query,**YAHOO_API_PARAMS} # requires python 3.5
payload_str = "&".join("%s=%s" % (k,v) for k,v in params.items()) # default url encoding will break it
r = requests.get(YAHOO_API_URL,params=payload_str)
result = r.json()
q = result["query"]["results"]["quote"]
json.dump(q, open("%s/%s.json" % (args.outdir,filename),'w'),indent=2)
args.infile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Fetch data from a list YQL Queries and save to files')
parser.add_argument('--infile', type=argparse.FileType('r'), help='input file of YQL Queries',default="output/1/yql-queries.txt")
parser.add_argument('--outdir', help='Output directory for YQL query responses',default="output/2/")
args = parser.parse_args()
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
main(args)
| {
"content_hash": "37cb1a2414a92c2a068a3fb177cff4ae",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 131,
"avg_line_length": 46.172413793103445,
"alnum_prop": 0.6855862584017924,
"repo_name": "lloydroc/stockcrawl",
"id": "1775d06a2b6ba448525fc73e60ec65b56886fe97",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2-query_yahoo_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1098"
},
{
"name": "Python",
"bytes": "12699"
}
],
"symlink_target": ""
} |
import os
from datadog_checks.dev import get_docker_hostname, get_here
HOST = get_docker_hostname()
PORT = os.getenv('POWERDNS_HOST_PORT_0', 8082)
HERE = get_here()
POWERDNS_RECURSOR_VERSION = os.environ['POWERDNS_RECURSOR_VERSION']
CONFIG = {"host": HOST, "port": PORT, "api_key": "pdns_api_key"}
CONFIG_V4 = {"host": HOST, "port": PORT, "version": 4, "api_key": "pdns_api_key"}
BAD_CONFIG = {"host": HOST, "port": PORT, "api_key": "nope"}
BAD_API_KEY_CONFIG = {"host": HOST, "port": '1111', "api_key": "pdns_api_key"}
def _config_sc_tags(config):
host_tag = "recursor_host:{0}".format(config['host'])
port_tag = "recursor_port:{0}".format(config['port'])
return [host_tag, port_tag]
def _get_pdns_version():
return int(POWERDNS_RECURSOR_VERSION[0])
| {
"content_hash": "a22e684eb76e2da2939f4d41226da9d9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 81,
"avg_line_length": 28.814814814814813,
"alnum_prop": 0.6542416452442159,
"repo_name": "DataDog/integrations-core",
"id": "c0aa68405633a289e8386ccf101ba80ba6b407d7",
"size": "888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "powerdns_recursor/tests/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import pywaves
import logging
class Asset(object):
def __init__(self, assetId, pywaves=pywaves):
self.pywaves = pywaves
self.assetId='' if assetId == pywaves.DEFAULT_CURRENCY else assetId
self.issuer = self.name = self.description = ''
self.quantity = self.decimals = 0
self.reissuable = False
self.minSponsoredAssetFee = None
if self.assetId=='':
self.quantity=self.pywaves.wrapper('/blockchain/rewards')['totalWavesAmount']
self.decimals=8
else:
self.status()
def __str__(self):
return 'status = %s\n' \
'assetId = %s\n' \
'issuer = %s\n' \
'name = %s\n' \
'description = %s\n' \
'quantity = %d\n' \
'decimals = %d\n' \
'reissuable = %s\n' \
'minSponsoredAssetFee = %s' % (self.status(), self.assetId, self.issuer, self.name, self.description, self.quantity, self.decimals, self.reissuable, self.minSponsoredAssetFee)
__repr__ = __str__
def status(self):
if self.assetId!=pywaves.DEFAULT_CURRENCY:
try:
req = self.pywaves.wrapper('/assets/details/%s' % self.assetId)
if req['assetId'] != None:
self.issuer = req['issuer']
self.quantity = req['quantity']
self.decimals = req['decimals']
self.reissuable = req['reissuable']
self.name = req['name'].encode('ascii', 'ignore')
self.description = req['description'].encode('ascii', 'ignore')
self.minSponsoredAssetFee = req['minSponsoredAssetFee']
return 'Issued'
except:
pass
def isSmart(self):
req = self.pywaves.wrapper('/transactions/info/%s' % self.assetId)
if ('script' in req and req['script']):
return True
else:
return False
class AssetPair(object):
def __init__(self, asset1, asset2, pywaves=pywaves):
self.pywaves = pywaves
self.asset1 = asset1
self.asset2 = asset2
self.a1 = pywaves.DEFAULT_CURRENCY if self.asset1.assetId == '' else self.asset1.assetId
self.a2 = pywaves.DEFAULT_CURRENCY if self.asset2.assetId == '' else self.asset2.assetId
def __str__(self):
return 'asset1 = %s\nasset2 = %s' % (self.asset1.assetId, self.asset2.assetId)
def refresh(self):
self.asset1.status()
self.asset2.status()
def first(self):
if len(self.asset1.assetId) < len(self.asset2.assetId):
return self.asset1
elif self.asset1.assetId < self.asset2.assetId:
return self.asset1
else:
return self.asset2
def second(self):
if len(self.asset1.assetId) < len(self.asset2.assetId):
return self.asset2
if self.asset1.assetId < self.asset2.assetId:
return self.asset2
else:
return self.asset1
def orderbook(self):
req = self.pywaves.wrapper('/matcher/orderbook/%s/%s' % (self.a1, self.a2), host=self.pywaves.MATCHER)
return req
def ticker(self):
return self.pywaves.wrapper('/v0/pairs/%s/%s' % (self.a1, self.a2), host=self.pywaves.DATAFEED)
def last(self):
return str(self.ticker()['data']['lastPrice'])
def open(self):
return str(self.ticker()['data']['firstPrice'])
def high(self):
return str(self.ticker()['data']['high'])
def low(self):
return str(self.ticker()['data']['low'])
def close(self):
return self.last()
def vwap(self):
return str(self.ticker()['data']['weightedAveragePrice'])
def volume(self):
return str(self.ticker()['data']['volume'])
def priceVolume(self):
return str(self.ticker()['data']['quoteVolume'])
def _getAPIData(self, request):
return self.pywaves.wrapper(request, host=self.pywaves.DATAFEED)
def _getMarketData(self, method, params):
return self.pywaves.wrapper('%s/%s/%s/%s' % (method, self.a1, self.a2, params), host=self.pywaves.DATAFEED)
def trades(self, *args):
amountAssetId = ''
priceAssetId = ''
if self.asset2.assetId == '':
amountAssetId = 'WAVES'
else:
amountAssetId = self.asset2.assetId
if self.asset1.assetId == '':
priceAssetId = 'WAVES'
else:
priceAssetId = self.asset1.assetId
if len(args)==1:
limit = args[0]
if limit > 0 and limit <= self.pywaves.MAX_WDF_REQUEST:
#return self._getMarketData('/api/trades/', '%d' % limit)
return self._getAPIData('/v0/transactions/exchange?amountAsset=' + amountAssetId + '&priceAsset=' + priceAssetId + '&limit=' + str(limit))
else:
msg = 'Invalid request. Limit must be >0 and <= 100'
self.pywaves.throw_error(msg)
return logging.error(msg)
elif len(args)==2:
fromTimestamp = args[0]
toTimestamp = args[1]
return self._getAPIData('/v0/transactions/exchange?timeStart=' + str(fromTimestamp) + '&timeEnd=' + str(toTimestamp) + '&amountAsset=' + amountAssetId + '&priceAsset=' + priceAssetId)
#return self._getMarketData('/api/trades', '%d/%d' % (fromTimestamp, toTimestamp))
def candles(self, *args):
if len(args)==2:
timeframe = args[0]
limit = args[1]
if timeframe not in self.pywaves.VALID_TIMEFRAMES:
msg = 'Invalid timeframe'
self.pywaves.throw_error(msg)
return logging.error(msg)
elif limit > 0 and limit <= self.pywaves.MAX_WDF_REQUEST:
return self._getMarketData('/candles', '%d/%d' % (timeframe, limit))
else:
msg = 'Invalid request. Limit must be >0 and <= 100'
self.pywaves.throw_error(msg)
return logging.error(msg)
elif len(args)==3:
timeframe = args[0]
fromTimestamp = args[1]
toTimestamp = args[2]
if timeframe not in self.pywaves.VALID_TIMEFRAMES:
msg = 'Invalid timeframe'
self.pywaves.throw_error(msg)
return logging.error(msg)
else:
return self._getMarketData('/candles', '%d/%d/%d' % (timeframe, fromTimestamp, toTimestamp))
__repr__ = __str__
| {
"content_hash": "92484d33314bde7872fbc111f555b239",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 195,
"avg_line_length": 37.31073446327684,
"alnum_prop": 0.5557238037552998,
"repo_name": "PyWaves/PyWaves",
"id": "06ca768767efcc07fc3623d36a0d6ff0fa1c8758",
"size": "6604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "191208"
}
],
"symlink_target": ""
} |
import mysql.connector
from model.Clases import Group
from model.Clases import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = mysql.connector.connect(host=host, database=name, user=user, password=password)
self.connection.autocommit = True
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("SELECT id, firstname, middlename, lastname, nickname, title, company, address, "
"home, mobile, work, email, email2, email3, homepage, byear, ayear, address2, "
"phone2, notes FROM addressbook WHERE deprecated=0")
for row in cursor:
(id, firstname, middlename, lastname, nickname, title, company, address,
home, mobile, work, email, email2, email3, homepage, byear, ayear, address2, phone2, notes) = row
list.append(Contact(id=str(id), firstname=firstname, middlename=middlename, lastname=lastname,
nickname=nickname, title=title, company=company,
address=address, home=home, mobile=mobile, work=work,
email=email, email2=email2, email3=email3,
homepage=homepage, byear=byear, ayear=ayear, address2=address2,
phone2=phone2, notes=notes, all_mails_from_homepage=email + email2 + email3,
all_phones_from_homepage=home + mobile + work + phone2))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
| {
"content_hash": "35ef8556e9d86a24733d8ee85888f165",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 114,
"avg_line_length": 44.22,
"alnum_prop": 0.5866123925825418,
"repo_name": "Bartk0/PythonTraining",
"id": "5a2977a5f3fc6e3c410290e448fdd764df2c6aad",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38404"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.