content
stringlengths 5
1.05M
|
---|
import glob
import os
import signal
#import resource
import logging
import re
import shutil
import tempfile
import shlex
import fnmatch
import platform
import subprocess
import threading
import traceback
from time import clock
def locate_program(candidatePaths):
for p in candidatePaths:
if os.path.isfile(p) and os.access(p, os.X_OK):
return Executable(p)
if platform.system() == 'Windows':
p = p + '.exe'
if os.path.isfile(p) and os.access(p, os.X_OK):
return Executable(p)
return None
def locate_checktestdata():
defaultPaths = [os.path.join(os.path.dirname(__file__),
'checktestdata/checktestdata'),
os.path.join(os.path.dirname(__file__),
'../support/checktestdata/checktestdata'),
'/usr/lib/problemtools/bin/checktestdata']
return locate_program(defaultPaths)
def locate_viva():
defaultPaths = [os.path.join(os.path.dirname(__file__),
'viva/viva.sh'),
os.path.join(os.path.dirname(__file__),
'../support/viva/viva.sh'),
'/usr/lib/problemtools/bin/viva.sh']
return locate_program(defaultPaths)
class ProgramError(Exception):
pass
class ProgramWarning(Exception):
pass
class Command(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, basestring):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
start = clock()
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
self.time = clock() - start
except:
self.time = clock() - start
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.time
class Runnable:
runtime = 0
def run(self, infile='/dev/null', outfile='/dev/null', errfile='/dev/null', args=None, timelim=1000, logger=None):
runcmd = self.get_runcmd()
if runcmd == []:
if logger != None:
logger.error('Could not figure out how to run %s' % self)
return (-1, 0.0)
if args == None:
args = [] # Damn you Python
status, runtime = self._run_wait(runcmd + args, infile, outfile, errfile, timelim)
self.runtime = max(self.runtime, runtime)
return status, runtime
def _run_wait(self, argv, infile="/dev/null", outfile="/dev/null", errfile="/dev/null", timelim=1000):
logging.debug('run "%s < %s > %s 2> %s"', ' '.join(argv), infile, outfile, errfile)
fin = open(os.devnull if infile == '/dev/null' else infile, 'r')
fout = open(os.devnull if outfile == '/dev/null' else outfile, 'w')
ferr = open(os.devnull if errfile == '/dev/null' else errfile, 'w')
command = Command(argv)
return command.run(timeout=timelim, stdin=fin, stdout=fout, stderr=ferr)
def _setfd(self, fd, filename, flag):
tmpfd = os.open(filename, flag)
os.dup2(tmpfd, fd)
os.close(tmpfd)
class Executable(Runnable):
def __init__(self, path):
self.path = path
self.name = os.path.basename(path)
def __str__(self):
return 'Executable(%s)' % (self.path)
def compile(self):
return True
def get_runcmd(self):
return [self.path]
class ValidationScript(Runnable):
_TYPES = {'.ctd': {'run': locate_checktestdata(),
'input_src': 'stdin',
'compile_exit': 1,
'run_exit': 0},
'.viva': {'run': locate_viva(),
'input_src': 'arg',
'compile_exit': 0,
'run_exit': 0}}
def __str__(self):
return 'ValidationScript(%s)' % (self.path)
def __init__(self, path):
ext = os.path.splitext(path)[1]
if not os.path.isfile(path) or ext not in ValidationScript._TYPES.keys():
raise ProgramWarning('Not a recognized validation script')
self.path = path
self.name = path
self.runcmd = None
self.type = ValidationScript._TYPES[ext]
if self.type['run'] is not None:
self.runcmd = self.type['run'].get_runcmd() + [path]
_compile_result = None
def compile(self):
if self._compile_result is None:
self._compile_result = False
(status, runtime) = self.run(switch_exitcodes=False)
self._compile_result = status == self.type['compile_exit']
return self._compile_result
def run(self, infile='/dev/null', outfile='/dev/null', errfile='/dev/null', args=None, timelim=1000, logger=None, switch_exitcodes=True):
if self.runcmd is None:
raise ProgramError('Could not locate runner for validation script %s' % self.path)
if self.type['input_src'] == 'arg' and infile != '/dev/null':
args = [infile]
(status, runtime) = Runnable.run(self, infile, outfile, errfile, args, timelim, logger)
# This is ugly, switches the accept exit status and our accept exit status 42.
if switch_exitcodes:
if status == self.type['run_exit']:
status = 42
elif status == 42:
status = self.type['run_exit']
return (status, runtime)
def get_runcmd(self):
return self.runcmd
class Program(Runnable):
# TODO: make language settings more configurable
_LANGNAME = {
'c': 'C',
'cpp': 'C++',
'csharp': 'C#',
'go': 'Go',
'haskell': 'Haskell',
'java': 'Java',
'objectivec': 'Objective-C',
'prolog': 'Prolog',
'python2': 'Python 2',
'python3': 'Python 3',
'ruby': 'Ruby',
'javascript': 'JavaScript',
'php': 'PHP'
}
_GLOBS = {'c': '*.c',
'cpp': '*.cc *.C *.cpp *.cxx *.c++',
'java': '*.java',
'csharp': '*.cs',
'python2': '*.py',
'python3': '*.py',
'ruby': '*.rb',
'go': '*.go',
'haskell': '*.hs',
'objectivec': '*.m',
'prolog': '*.pl',
'javascript': '*.js',
'php': '*.php',
}
_SHEBANGS = {'python2': r"^#!.*python2\b",
'python3': r"^#!.*python3\b"}
_SHEBANG_DEFAULT = ['python2']
_COMPILE = {
'c': 'gcc -g -O2 -static -std=gnu99 -o "%(exe)s" %(src)s -lm' if platform.system() != 'Darwin' else 'gcc -g -O2 -std=gnu99 -o "%(exe)s" %(src)s -lm',
'cpp': 'g++ -g -O2 -static -std=gnu++11 -o "%(exe)s" %(src)s' if platform.system() != 'Darwin' else 'g++ -g -O2 -std=gnu++11 -o "%(exe)s" %(src)s',
'java': 'javac -d %(path)s %(src)s',
'prolog': 'swipl -O -q -g main -t halt -o "%(exe)s" -c %(src)s',
'csharp': 'csc /optimize+ /out:%(exe)s.exe %(src)s',
'go': 'gccgo -g -static-libgcc -o "%(exe)s" %(src)s',
'haskell': 'ghc -O2 -ferror-spans -threaded -rtsopts -o "%(exe)s" %(src)s',
'dir': 'cd "%(path)s" && ./build',
}
_RUN = {
'c': ['%(exe)s'],
'cpp': ['%(exe)s'],
'java': ['java','-Xmx2048m','-Xss64m', '-cp', '%(path)s', '%(mainclass)s'],
'prolog': ['%(exe)s'],
'python2': ['python','%(mainfile)s'],
'python3': ['python3','%(mainfile)s'],
'ruby': ['ruby','%(mainfile)s'],
'csharp': ['%(exe)s.exe'],
'go': ['%(exe)s'],
'haskell': ['%(exe)s'],
'dir': ['%(path)s/run'],
'javascript': ['js24', '%(mainfile)s'],
'php': ['php','-n','%(mainfile)s'],
}
_RUN_PATH_VARS = ['path', 'mainfile', 'exe']
def check_shebang(self, file):
shebang_line = open(file, 'r').readline()
for (lang,shebang_pattern) in Program._SHEBANGS.iteritems():
if re.search(shebang_pattern, shebang_line):
return lang
return None
def list_files(self, lang):
if lang in ['dir']:
return None
globs = Program._GLOBS[lang].split()
result = []
for (path,dirs,files) in os.walk(self.path):
for f in files:
fullpath = os.path.join(self.path, path, f)
for g in globs:
if fnmatch.fnmatch(fullpath, g):
if lang in Program._SHEBANGS.keys():
sheblang = self.check_shebang(fullpath)
if ((sheblang is None and lang not in Program._SHEBANG_DEFAULT) or
(sheblang is not None and sheblang != lang)):
continue
result.append(fullpath)
break
return result
def guess_language(self):
files = [os.path.join(self.path, f) for f in os.listdir(self.path)]
executables = [os.path.basename(f) for f in files if os.access(f, os.X_OK)]
has_build = 'build' in executables
has_run = 'run' in executables
if has_build and has_run:
return 'dir'
elif has_build:
raise ProgramWarning("Has build script but no run script; I'm confused and won't use this")
elif has_run:
raise ProgramWarning("Has run script but no build script; I'm confused and won't use this")
possible_langs = []
for lang in Program._GLOBS:
if len(self.list_files(lang)) > 0:
possible_langs.append(lang)
if len(possible_langs) == 1:
return possible_langs[0]
if len(possible_langs) > 1:
raise ProgramError('Could not uniquely determine language. Candidates are: %s' % (', '.join(possible_langs)))
raise ProgramWarning('Could not guess any language.')
def add_files(self, srcdir):
for f in os.listdir(srcdir):
src = os.path.join(srcdir, f)
dest = os.path.join(self.path, f)
if os.path.isdir(src):
shutil.copytree(src, dest)
else:
shutil.copy(src, dest)
def __init__(self, path, workdir, includedir=None):
if path[-1] == '/':
path = path[:-1]
self.name = os.path.basename(path)
self.path = os.path.join(workdir, self.name)
if os.path.exists(self.path):
self.path = tempfile.mkdtemp(prefix='%s-' % self.name, dir=workdir)
else:
os.makedirs(self.path)
if os.path.isdir(path):
self.add_files(path)
else:
shutil.copy(path, self.path)
self.lang = self.guess_language()
if includedir is not None:
includedir = os.path.join(includedir, self.lang)
if os.path.isdir(includedir):
self.add_files(includedir)
self.srclist = self.list_files(self.lang)
if self.srclist is not None:
self.src = ' '.join(self.srclist)
mainfiles = [x for x in self.srclist if re.match('^[Mm]ain\.', os.path.basename(x))]
if len(mainfiles) > 1:
raise ProgramError('Multiple possible main-files: %s' % ', '.join(mainfiles))
self.mainfile = mainfiles[0] if len(mainfiles) == 1 else self.srclist[0]
self.mainclass = os.path.splitext(os.path.basename(self.mainfile))[0]
self.exe = os.path.join(self.path, 'run')
_compile_result = None
def compile(self, logger=None):
if self._compile_result is not None:
return self._compile_result
if self.lang not in Program._COMPILE:
self._compiler_result = True
return True
compiler = (Program._COMPILE[self.lang]) % self.__dict__
logging.debug('compile: %s', compiler)
out = open(os.devnull, 'wb')
status = subprocess.call(compiler, stdout=out, stderr=out)
if status != 0:
if logger is not None:
logger.error('Compiler failed (status %d) when compiling %s\n Command used: %s' % (status, self.name, compiler))
self._compile_result = False
return False
self._compile_result = True
return True
runtime = 0
def get_runcmd(self, cwd=None):
self.compile()
vals = self.__dict__
if cwd is not None:
vals = vals.copy()
for key in Program._RUN_PATH_VARS:
if key in vals:
vals[key] = os.path.relpath(vals[key], cwd)
return map(lambda x: x % vals, Program._RUN[self.lang])
def __str__(self):
return 'Program(%s)' % (self.name)
|
#!/usr/bin/env python
"""
Build script for the shared library providing the C ABI bridge to LLVM.
"""
from __future__ import print_function
from ctypes.util import find_library
import os
import subprocess
import shutil
import sys
import tempfile
here_dir = os.path.abspath(os.path.dirname(__file__))
build_dir = os.path.join(here_dir, 'build')
target_dir = os.path.join(os.path.dirname(here_dir), 'llvmlite', 'binding')
is_64bit = sys.maxsize >= 2**32
def try_cmake(cmake_dir, build_dir, generator):
old_dir = os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['cmake', '-G', generator, cmake_dir])
finally:
os.chdir(old_dir)
def run_llvm_config(llvm_config, args):
cmd = [llvm_config] + args
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
out = out.decode()
err = err.decode()
rc = p.wait()
if rc != 0:
raise RuntimeError("Command %s returned with code %d; stderr follows:\n%s\n"
% (cmd, rc, err))
return out
def find_win32_generator():
"""
Find a suitable cmake "generator" under Windows.
"""
# XXX this assumes we will find a generator that's the same, or
# compatible with, the one which was used to compile LLVM... cmake
# seems a bit lacking here.
cmake_dir = os.path.join(here_dir, 'dummy')
# LLVM 4.0+ needs VS 2015 minimum.
for generator in ['Visual Studio 14 2015']:
if is_64bit:
generator += ' Win64'
build_dir = tempfile.mkdtemp()
print("Trying generator %r" % (generator,))
try:
try_cmake(cmake_dir, build_dir, generator)
except subprocess.CalledProcessError:
continue
else:
# Success
return generator
finally:
shutil.rmtree(build_dir)
raise RuntimeError("No compatible cmake generator installed on this machine")
def main_win32():
generator = find_win32_generator()
config = 'Release'
if not os.path.exists(build_dir):
os.mkdir(build_dir)
# Run configuration step
try_cmake(here_dir, build_dir, generator)
subprocess.check_call(['cmake', '--build', build_dir, '--config', config])
shutil.copy(os.path.join(build_dir, config, 'llvmlite.dll'), target_dir)
def main_posix(kind, library_ext):
os.chdir(here_dir)
# Check availability of llvm-config
llvm_config = os.environ.get('LLVM_CONFIG', 'llvm-config')
print("LLVM version... ", end='')
sys.stdout.flush()
try:
out = subprocess.check_output([llvm_config, '--version'])
except (OSError, subprocess.CalledProcessError):
raise RuntimeError("%s failed executing, please point LLVM_CONFIG "
"to the path for llvm-config" % (llvm_config,))
out = out.decode('latin1')
print(out)
if not out.startswith('6.0.'):
msg = (
"Building llvmlite requires LLVM 5.0.x. Be sure to "
"set LLVM_CONFIG to the right executable path.\n"
"Read the documentation at http://llvmlite.pydata.org/ for more "
"information about building llvmlite.\n"
)
raise RuntimeError(msg)
# Get LLVM information for building
libs = run_llvm_config(llvm_config, "--system-libs --libs all".split())
# Normalize whitespace (trim newlines)
os.environ['LLVM_LIBS'] = ' '.join(libs.split())
cxxflags = run_llvm_config(llvm_config, ["--cxxflags"])
cxxflags = cxxflags.split() + ['-fno-rtti', '-g']
os.environ['LLVM_CXXFLAGS'] = ' '.join(cxxflags)
ldflags = run_llvm_config(llvm_config, ["--ldflags"])
os.environ['LLVM_LDFLAGS'] = ldflags.strip()
# static link libstdc++ for portability
if int(os.environ.get('LLVMLITE_CXX_STATIC_LINK', 0)):
os.environ['CXX_STATIC_LINK'] = "-static-libstdc++"
makefile = "Makefile.%s" % (kind,)
subprocess.check_call(['make', '-f', makefile])
shutil.copy('libllvmlite' + library_ext, target_dir)
def main():
if sys.platform == 'win32':
main_win32()
elif sys.platform.startswith('linux'):
main_posix('linux', '.so')
elif sys.platform.startswith('freebsd'):
main_posix('freebsd', '.so')
elif sys.platform == 'darwin':
main_posix('osx', '.dylib')
else:
raise RuntimeError("unsupported platform: %r" % (sys.platform,))
if __name__ == "__main__":
main()
|
# pylint: disable=no-member
import json
import urllib
import signal
import time
import pycurl
POST_PARAMS = {}
class Client:
def __init__(self, sensor, api_url, api_user, api_password):
if sensor is not None:
self._sensor = sensor
else:
self._sensor = None
self.conn = None
self.buffer = ''
self.keep_trying = 1
self.api_url = api_url
self.api_user = api_user
self.api_password = api_password
signal.signal(signal.SIGINT, self.handle_ctrl_c)
def handle_ctrl_c(self, signal, frame):
if self._sensor is not None:
self._sensor.logger.info('SIGINT receivied')
else:
print 'You pressed Ctrl+C!'
self.abort_session()
def abort_session(self):
self.keep_trying = 0
self.conn.close()
def setup_connection(self):
if self.conn:
self.conn.close()
self.buffer = ''
self.headers = ['Accept: application/json', 'Expect:']
self.conn = pycurl.Curl()
self.conn.setopt(pycurl.SSL_VERIFYHOST, False)
self.conn.setopt(pycurl.SSL_VERIFYPEER, False)
self.conn.setopt(pycurl.USERPWD, "%s:%s" % (self.api_user, self.api_password))
self.conn.setopt(pycurl.URL, self.api_url)
self.conn.setopt(pycurl.VERBOSE, 1)
self.conn.setopt(pycurl.WRITEFUNCTION, self.on_receive)
self.conn.setopt(pycurl.NOSIGNAL, 1)
self.conn.setopt(pycurl.NOPROGRESS, 0)
self.conn.setopt(pycurl.PROGRESSFUNCTION, self.on_progress)
self.conn.setopt(pycurl.HTTPHEADER, self.headers)
self.conn.setopt(pycurl.POST, 1)
self.conn.setopt(pycurl.POSTFIELDS, urllib.urlencode(POST_PARAMS))
def on_receive(self, data):
self.buffer += data
if data.endswith('\n') and self.buffer.strip():
# complete message received
event = json.loads(self.buffer)
self.buffer = ''
if self._sensor is not None:
self._sensor.process_event(event)
else:
print event
def on_progress(self, d_total, downloaded, u_total, uploaded):
pass
# print "on_progress called"
def __del__(self):
self.conn.close()
def start(self):
if self._sensor is not None:
self._sensor.logger.info('pyCurl started.')
backoff_network_error = 0.25
backoff_http_error = 5
backoff_rate_limit = 60
while True and self.keep_trying == 1:
if self._sensor is not None:
self._sensor.logger.info('keep trying = %i', self.keep_trying)
else:
print "keep trying = %i" % self.keep_trying
self.setup_connection()
try:
self.conn.perform()
except:
# Network error, use linear back off up to 16 seconds
if self.keep_trying == 0:
continue
if self._sensor is not None:
self._sensor.logger.info('Network error: %s', self.conn.errstr())
self._sensor.logger.info('Waiting %s seconds before trying again',
backoff_network_error)
else:
print 'Network error: %s' % self.conn.errstr()
print 'Waiting %s seconds before trying again' % backoff_network_error
time.sleep(backoff_network_error)
backoff_network_error = min(backoff_network_error + 1, 16)
continue
# HTTP Error
sc = self.conn.getinfo(pycurl.HTTP_CODE)
if sc == 420:
# Rate limit, use exponential back off starting with 1 minute, and doubling
if self._sensor is not None:
self._sensor.logger.info('Rate limit, waiting %s seconds', backoff_rate_limit)
else:
print 'Rate limit, waiting %s seconds' % backoff_rate_limit
time.sleep(backoff_rate_limit)
backoff_rate_limit *= 2
elif sc == 401:
# Authentication error
if self._sensor is not None:
self._sensor.logger.info(
'Authentication error, check user/password, waiting %s seconds',
backoff_rate_limit)
else:
print 'Authentication error, waiting %s seconds' % backoff_rate_limit
time.sleep(backoff_rate_limit)
backoff_rate_limit *= 2
elif sc == 404:
# Authorization error
if self._sensor is not None:
self._sensor.logger.info(
'Authorization error, check permissions, waiting %s seconds',
backoff_rate_limit)
else:
print 'Authorization error, waiting %s seconds' % backoff_rate_limit
time.sleep(backoff_rate_limit)
backoff_rate_limit *= 2
else:
# HTTP error, use exponential back off up to 320 seconds
if self._sensor is not None:
self._sensor.logger.info('HTTP error %s, %s', sc, self.conn.errstr())
self._sensor.logger.info('Waiting %s seconds', backoff_http_error)
else:
print 'HTTP error %s, %s' % (sc, self.conn.errstr())
print 'Waiting %s seconds' % backoff_http_error
time.sleep(backoff_http_error)
backoff_http_error = min(backoff_http_error * 2, 320)
|
import sys
import time
import numpy as np
from gym import spaces
from ..game.constants import Constants
"""
Implements the base class for a training Agent
"""
class Agent:
def __init__(self) -> None:
"""
Implements an agent opponent
"""
self.team = None
self.match_controller = None
def game_start(self, game):
"""
This function is called at the start of each game. Use this to
reset and initialize per game. Note that self.team may have
been changed since last game. The game map has been created
and starting units placed.
Args:
game ([type]): Game.
"""
pass
def process_turn(self, game, team):
"""
Decides on a set of actions for the current turn.
:param game:
:param team:
:return: Array of actions to perform for this turn.
"""
actions = []
return actions
def pre_turn(self, game, is_first_turn=False):
"""
Called before a turn starts. Allows for modifying the game environment.
Generally only used in kaggle submission opponents.
:param game:
"""
return
def post_turn(self, game, actions):
"""
Called after a turn. Generally only used in kaggle submission opponents.
:param game:
:param actions:
:return: (bool) True if it handled the turn (don't run our game engine)
"""
return False
def turn_heurstics(self, game, is_first_turn):
"""
This is called pre-observation actions to allow for hardcoded heuristics
to control a subset of units. Any unit or city that gets an action from this
callback, will not create an observation+action.
Args:
game ([type]): Game in progress
is_first_turn (bool): True if it's the first turn of a game.
"""
return
def get_agent_type(self):
"""
Returns the type of agent. Use AGENT for inference, and LEARNING for training a model.
"""
return Constants.AGENT_TYPE.AGENT
def set_team(self, team):
"""
Sets the team id that this agent is controlling
:param team:
"""
self.team = team
def set_controller(self, match_controller):
"""
"""
self.match_controller = match_controller
class AgentFromReplay(Agent):
"""
Base class for an agent from a specified json replay file.
"""
def __init__(self, replay=None) -> None:
"""
Implements an agent opponent
"""
super().__init__()
self.replay = replay
def get_agent_type(self):
"""
Returns the type of agent. Use AGENT for inference, and LEARNING for training a model.
"""
return Constants.AGENT_TYPE.AGENT
def process_turn(self, game, team):
"""
Decides on a set of actions for the current turn.
:param game:
:param team:
:return: Array of actions to perform for this turn.
"""
actions = []
turn = game.state["turn"]
if self.replay is not None:
acts = self.replay['steps'][turn+1][team]["action"]
acts = [game.action_from_string(a, team) for a in acts]
acts = [a for a in acts if a is not None]
actions.extend(acts)
return actions
class AgentWithModel(Agent):
"""
Base class for a stable_baselines3 reinforcement learning agent.
"""
def __init__(self, mode="train", model=None) -> None:
"""
Implements an agent opponent
"""
super().__init__()
self.action_space = spaces.Discrete(10)
self.observation_space = spaces.Box(low=0, high=1, shape=(10,1), dtype=np.float16)
self.model = model
self.mode = mode
def get_agent_type(self):
"""
Returns the type of agent. Use AGENT for inference, and LEARNING for training a model.
"""
if self.mode == "train":
return Constants.AGENT_TYPE.LEARNING
else:
return Constants.AGENT_TYPE.AGENT
def get_reward(self, game, is_game_finished, is_new_turn, is_game_error):
"""
Returns the reward function for this step of the game. Reward should be a
delta increment to the reward, not the total current reward.
"""
return 0
def get_observation(self, game, unit, city_tile, team, is_new_turn):
"""
Implements getting a observation from the current game for this unit or city
"""
return np.zeros((10,1))
def process_turn(self, game, team):
"""
Decides on a set of actions for the current turn. Not used in training, only inference. Generally
don't modify this part of the code.
Returns: Array of actions to perform.
"""
start_time = time.time()
actions = []
new_turn = True
# Inference the model per-unit
units = game.state["teamStates"][team]["units"].values()
for unit in units:
if unit.can_act():
obs = self.get_observation(game, unit, None, unit.team, new_turn)
# IMPORTANT: You can change deterministic=True to disable randomness in model inference. Generally,
# I've found the agents get stuck sometimes if they are fully deterministic.
action_code, _states = self.model.predict(obs, deterministic=False)
if action_code is not None:
actions.append(
self.action_code_to_action(action_code, game=game, unit=unit, city_tile=None, team=unit.team))
new_turn = False
# Inference the model per-city
cities = game.cities.values()
for city in cities:
if city.team == team:
for cell in city.city_cells:
city_tile = cell.city_tile
if city_tile.can_act():
obs = self.get_observation(game, None, city_tile, city.team, new_turn)
# IMPORTANT: You can change deterministic=True to disable randomness in model inference. Generally,
# I've found the agents get stuck sometimes if they are fully deterministic.
action_code, _states = self.model.predict(obs, deterministic=False)
if action_code is not None:
actions.append(
self.action_code_to_action(action_code, game=game, unit=None, city_tile=city_tile,
team=city.team))
new_turn = False
time_taken = time.time() - start_time
if time_taken > 0.5: # Warn if larger than 0.5 seconds.
print("WARNING: Inference took %.3f seconds for computing actions. Limit is 1 second." % time_taken,
file=sys.stderr)
return actions
class AgentFromStdInOut(Agent):
"""
Wrapper for an external agent where this agent's commands are coming in through standard input.
"""
def __init__(self) -> None:
"""
Implements an agent opponent
"""
super().__init__()
self.initialized_player = False
self.initialized_map = False
def pre_turn(self, game, is_first_turn=False):
"""
Called before a turn starts. Allows for modifying the game environment.
Generally only used in kaggle submission opponents.
:param game:
"""
# Read StdIn to update game state
# Loosly implements:
# /Lux-AI-Challenge/Lux-Design-2021/blob/master/kits/python/simple/main.py
# AND /kits/python/simple/agent.py agent(observation, configuration)
updates = []
while True:
message = input()
if not self.initialized_player:
team = int(message)
self.set_team((team + 1) % 2)
self.match_controller.set_opponent_team(self, team)
self.initialized_player = True
elif not self.initialized_map:
# Parse the map size update message, it's always the second message of the game
map_info = message.split(" ")
game.configs["width"] = int(map_info[0])
game.configs["height"] = int(map_info[1])
# Use an empty map, because the updates will fill the map out
game.configs["mapType"] = Constants.MAP_TYPES.EMPTY
self.initialized_map = True
else:
updates.append(message)
if message == "D_DONE": # End of turn data marker
break
# Reset the game to the specified state. Don't increment turn counter on first turn of game.
game.reset(updates=updates, increment_turn=not is_first_turn)
def post_turn(self, game, actions) -> bool:
"""
Called after a turn. Generally only used in kaggle submission opponents.
:param game:
:param actions:
:return: (bool) True if it handled the turn (don't run our game engine)
"""
# TODO: Send the list of actions to stdout in the correct format.
messages = []
for action in actions:
messages.append(action.to_message(game))
# Print the messages to the kaggle controller
if len(messages) > 0:
print(",".join(messages))
else:
# Print a new line. This is needed for the main_kaggle_submission.py wrapper to work
print("")
print("D_FINISH")
# True here instructs the controller to not simulate the actions. Instead the kaggle controller will
# run the turn and send back pre-turn map state.
return True
|
import typing
import fastapi
import sqlalchemy.orm
from wod_board import config
from wod_board import exceptions
from wod_board.crud import goal_crud
from wod_board.models import get_db
from wod_board.models import goal
from wod_board.schemas import goal_schemas
from wod_board.schemas import user_schemas
from wod_board.utils import user_utils
router = fastapi.APIRouter(prefix=f"{config.API_URL}/goal", tags=["goal"])
@router.post("/", response_model=goal_schemas.Goal)
async def create_goal(
goal_data: goal_schemas.GoalCreate,
db: sqlalchemy.orm.Session = fastapi.Depends(get_db),
current_user: user_schemas.User = fastapi.Depends(user_utils.get_user_with_token),
) -> goal.Goal:
try:
return goal_crud.create_goal(db, goal_data, current_user.id)
except exceptions.UnknownMovement as error:
raise exceptions.RouterException(error)
except exceptions.UnknownRound as error:
raise exceptions.RouterException(error)
except exceptions.UserIsNotAuthor as error:
raise exceptions.RouterException(error)
@router.get(
"/goals/{round_id}", response_model=typing.List[typing.Optional[goal_schemas.Goal]]
)
async def get_goals_by_round_id(
round_id: int,
db: sqlalchemy.orm.Session = fastapi.Depends(get_db),
) -> typing.List[typing.Optional[goal.Goal]]:
return goal_crud.get_goals_by_round_id(db, round_id)
@router.put("/{goal_id}", response_model=goal_schemas.Goal)
async def update_goal(
goal_data: goal_schemas.GoalCreate,
goal_id: int,
db: sqlalchemy.orm.Session = fastapi.Depends(get_db),
current_user: user_schemas.User = fastapi.Depends(user_utils.get_user_with_token),
) -> goal.Goal:
try:
return goal_crud.update_goal(db, goal_data, goal_id, current_user.id)
except exceptions.UnknownGoal as error:
raise exceptions.RouterException(error)
except exceptions.UnknownMovement as error:
raise exceptions.RouterException(error)
except exceptions.UnknownRound as error:
raise exceptions.RouterException(error)
except exceptions.UserIsNotAuthor as error:
raise exceptions.RouterException(error)
@router.delete("/{goal_id}")
async def delete_goal_by_id(
goal_id: int,
db: sqlalchemy.orm.Session = fastapi.Depends(get_db),
current_user: user_schemas.User = fastapi.Depends(user_utils.get_user_with_token),
) -> typing.Dict["str", "str"]:
try:
goal_crud.delete_goal_by_id(db, goal_id, current_user.id)
except exceptions.UnknownGoal as error:
raise exceptions.RouterException(error)
except exceptions.UserIsNotAuthor as error:
raise exceptions.RouterException(error)
return {"detail": "Goal successfully deleted"}
|
import ast
from requests import HTTPError
from zoho_subscriptions.client.client import Client
from zoho_subscriptions.subscriptions.addon import Addon
from zoho_subscriptions.subscriptions.customer import Customer
from zoho_subscriptions.subscriptions.hostedpage import HostedPage
from zoho_subscriptions.subscriptions.invoice import Invoice
from zoho_subscriptions.subscriptions.plan import Plan
from django.conf import settings as configuration
class Subscription:
def __init__(self, config=None):
if config is None:
self.client = Client(**configuration.ZOHO_SUBSCRIPTION_CONFIG)
else:
self.client = Client(config)
def plan(self):
return Plan(self.client)
def customer(self):
return Customer(self.client)
def add_on(self):
return Addon(self.client)
def invoice(self):
return Invoice(self.client)
def hosted_page(self):
return HostedPage(self.client)
def get(self, id):
cache_key = "zoho_subscription_%s" % id
response = self.client.get_from_cache(cache_key)
if response is None:
get_subscription_by_id_uri = "subscriptions/%s" % id
response = self.client.send_request("GET", get_subscription_by_id_uri)
self.client.add_to_cache(cache_key, response)
else:
print ("Returning from cache : " + cache_key)
return response
def create(self, data):
return self.client.send_request("POST", 'subscriptions', data=data, headers=None)
def cancel_at_end(self, subscription_id):
return self.client.send_request("POST", 'subscriptions/{0}/cancel?cancel_at_end=true'.format(subscription_id), headers=None)
def buy_add_on(self, subscription_id, data):
buy_add_on_uri = 'subscriptions/%s/buyonetimeaddon' % subscription_id
return self.client.send_request("POST", buy_add_on_uri, data=data, headers=None)
def associate_coupon(self, subscription_id, coupon_code):
coupon_uri = 'subscriptions/%s/coupons/%s' % (subscription_id, coupon_code)
return self.client.send_request("POST", coupon_uri)
def reactivate(self, subscription_id):
reactivate_uri = 'subscriptions/%s/reactivate' % subscription_id
self.client.send_request("POST", reactivate_uri)
def list_subscriptions_by_customer(self, customer_id):
cache_key = "zoho_subscriptions_by_customer_%s" % customer_id
response = self.client.get_from_cache(cache_key)
if response is None:
subscriptions_by_customer_uri = 'subscriptions?customer_id=%s' % customer_id
result = self.client.send_request("GET", subscriptions_by_customer_uri)
if isinstance(result, HTTPError):
raise HTTPError(result)
response = result['subscriptions']
self.client.add_to_cache(cache_key, response)
else:
print("Returning from cache : " + cache_key)
return response
def get_subscriptions(self,subscription_id):
cache_key = "zoho_subscriptions_by_customer_%s" % subscription_id
response = self.client.get_from_cache(cache_key)
if response is None:
subscriptions_by_subscription_id_uri = 'subscriptions/%s'%subscription_id
result = self.client.send_request("GET", subscriptions_by_subscription_id_uri)
if type(result) is HTTPError:
result_bytes = result.response._content
result_dict = ast.literal_eval(result_bytes.decode('utf-8'))
return result_dict['message']
else:
response = result['subscription']
self.client.add_to_cache(cache_key, response)
else:
print("Returning from cache : " + cache_key)
return response
|
from .FileInfo import FileInfo
class generic_file(FileInfo):
"""
Class for generic files that do not need much to happen with them
(ie. just view, whatever...)
"""
def __init__(self, id_=None, file=None, parent=None):
super(generic_file, self).__init__(id_, file, parent)
self._type = 'generic'
self.requires_save = False
# any data tpe in the following list will automatically be assigned
# the .display_raw = True property
# they will also be not be specificed as unknown
self.types_to_display_raw = ['.txt', '.m', '.py', '', '.xml']
def load_data(self):
pass
@property
def dtype(self):
return self._type
@dtype.setter
def dtype(self, value):
# we will allow setting of type for this particular file type
# as we want it to be able to be set whenever
self._type = value
if value in self.types_to_display_raw:
self.display_raw = True
self.unknown_type = False
else:
self.display_raw = False
self.unknown_type = True
|
"""Django REST Framework views"""
from django.conf import settings
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from flipt_pb2 import ListFlagRequest
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from flipt.clients import client
CACHE_SECONDS = getattr(settings, 'FLIPT_CACHE_SECONDS', 0)
class FeatureFlagListView(APIView):
"""View to retrieve all Flipt flags"""
permission_classes = (AllowAny,)
@method_decorator(cache_page(CACHE_SECONDS))
def get(self, _):
if client is None:
default = getattr(settings, 'FLIPT_FLAG_DEFAULT', True)
return Response({'_default': default})
response = client.ListFlags(ListFlagRequest())
results = {}
for flag in response.flags:
results[flag.key] = flag.enabled
return Response(results)
|
import ply.lex as lex
import ply.yacc as yacc
import sys
tokens = ["NUM", "PAL"]
literals = ["[","]", ","]
t_NUM = r"\d+"
t_PAL = r"[a-zA-Z]+"
t_ignore = " \n\t\r"
def t_error(t):
print("Illegal character " + t.value[0])
lexer = lex.lex()
#analisador léxico
#for line in sys.stdin:
# lexer.input(line)
# for tok in lexer:
# print(tok)
#analisador sintático
#def p_grammar(p):
# """
# lista : '[' conteudo ']'
#
# conteudo :
# | elementos
#
# elementos : elem
# | elem ',' elementos
#
# elem : PAL
# | NUM
# """
#funcionam as duas versões mas recursividade à esquerda é mais eficiente(?)
def p_lista(p):
"lista : '[' conteudo ']'"
def p_conteudo_vazio(p):
"conteudo : "
def p_conteudo_elementos(p):
"conteudo : elementos"
def p_elementos_um(p):
"elementos : elem"
def p_elementos_lista(p):
"elementos : elementos ',' elem"
def p_elem_num(p):
"elem : NUM"
print("Encontrei um número")
p.parser.contador_nums =+1
def p_elem_pal(p):
"elem : PAL"
print("Encontrei uma palavra")
p.parser.contador_pals = + 1
def p_elem_lista(p):
"elem : lista"
def p_error(p):
print("Sintax error")
parser = yacc.yacc()
parser.contador_nums = 0
parser.contador_pals = 0
p = 0
# yacc é bottom-up
for line in sys.stdin:
parser.parse(line)
print("Encontrei " + str(parser.contador_pals) + " palavras")
|
#!/usr/bin/env python
#
# notifier.py
#
# Copyright (c) 2014-2015 Junpei Kawamoto
#
# This software is released under the MIT License.
#
# http://opensource.org/licenses/mit-license.php
#
import argparse
import fnmatch
import json
import re
import sys
from docker import Docker
from pushover import Pushover
__APPLICATION__="docker-notifier"
class PushoverNotifier(object):
def __init__(self, user, token):
self._pushover = Pushover(user, token)
def create(self, id, name=None):
pass
def die(self, id, name=None):
if name:
self._pushover.send("Container {0} exited.".format(name))
class StreamNotifier(object):
def __init__(self, output):
self._output = output
def create(self, id, name=None):
self._write(id, name, "create")
def die(self, id, name=None):
self._write(id, name, "die")
def _write(self, id, name, status):
data = {
"posted-by": __APPLICATION__,
"name": name,
"id": id,
"status": status
}
if name:
data["name"] = name
json.dump(data, self._output)
self._output.write("\n")
def main(socket, filter, notifier, **kwargs):
regex = None
if filter:
regex = fnmatch.translate(filter)
docker = Docker(socket)
push = notifier(**kwargs)
names = {}
for e in docker.events():
if e["status"] == "create":
id = e["id"]
res = docker.inspect(id)
name = res["Name"][1:]
names[id] = name
if not regex or regex.match(name):
push.create(id, name)
if e["status"] == "die":
id = e["id"]
name = names[id] if id in names else None
if not regex or regex.match(name):
push.die(id, name)
if id in names:
del names[id]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--socket", default="/var/run/docker.sock", help="Unix socket file of docker.")
parser.add_argument("--filter", help="Unix style pattern to filter containers.")
subparsers = parser.add_subparsers()
pushover_cmd = subparsers.add_parser("pushover", help="Notify events via Pushover.")
pushover_cmd.add_argument("user", help="User key.")
pushover_cmd.add_argument("token", help="Application key.")
pushover_cmd.set_defaults(notifier=PushoverNotifier)
stream_cmd = subparsers.add_parser("stream", help="Notify events via stdout/file.")
stream_cmd.add_argument("--output", default=sys.stdout, type=argparse.FileType("w"))
stream_cmd.set_defaults(notifier=StreamNotifier)
try:
main(**vars(parser.parse_args()))
except KeyboardInterrupt:
pass
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
#=======================================================================================
# Imports
#=======================================================================================
import argparse
import configparser
from lib.base import *
from lib.cappconfig import *
from lib.plugins import *
from lib.configutils import PluginDirPaths
#=======================================================================================
# Configuration
#=======================================================================================
defaults = Defaults()
#=======================================================================================
# Library
#=======================================================================================
#==========================================================
# Exceptions
#==========================================================
#==========================================================
class CappLibUnknown(Exception):
def __init__(self, message):
super().__init__(message)
#==========================================================
# Capp Classes
#==========================================================
#==========================================================
class CappLib(object):
def __init__(self, name):
self.name = name
#==========================================================
class CappFlavor(object):
def __init__(self, name):
self.name = name
"""So, we need to do this:
ConfigSetup needs to take initialization parameters such as directories in its constructor. The getConfig
method needs to be altered subsequently to allow either for overriding these values or complementing them.
That way, we don't have to worry about initialization and we can simply pass a ConfigSetup object
to the plugins, being compatible for all sorts of potential initialization procedures, like databases."""
#==========================================================
class Capps(object):
#=============================
"""A collection of all the capps we manage."""
#=============================
# Bugs:
# - basicConfig has the plugin dirs, and it has them without the hard coded default.
# It shouldn't have them in the first place.
# Will move on for now by getting the data from default, as it's supposed to be.
# Issues:
# - [Aziroshin] I really don't like that the capp configs need to specify the capplib.
# I should probably load the flavor file just to get the capplib name and then pass
# it to the capp lib for proper loading.
def __init__(self, cappConfigDirPath):
self.cappConfigDirPath = cappConfigDirPath
def getAll(self):
allCapps = []
#print("[DEBUG][capps.py:Capps:getAll]", "called. Going to probe for conf file: ", self.cappConfigDirPath, os.listdir(self.cappConfigDirPath))
for configFileName in os.listdir(self.cappConfigDirPath):
configFilePath = os.path.join(self.cappConfigDirPath, configFileName)
#print("[DEBUG][capphandler.py:Capps:getAll]", "Conf file found.")
#print("[DEBUG][capphandler.py:Capps:getAll]", "configFilePath: ", configFilePath)
if configFilePath.rpartition(".")[2] == "conf":
#print("[DEBUG][capphandler.py:Capps:getAll]", "Conf file name ends with .conf.")
basicConfig = BasicCappConfigSetup().getConfig(configFilePaths=[configFilePath])
#print("[DEBUG][capphandler.py:Capps:getAll]", "basicConfig anatomy", basicConfig)
# Get the basic version of the flavor plugin bootstrapped, just enough to load the capplib.
cappFlavorPlugin = CappFlavorPlugin(defaults.pluginDirPaths, basicConfig.cappFlavorName)
cappFlavorPlugin.loadInitial(BasicFlavorConfigSetup())
# Get the capp plugin.
#print("[DEBUG] [capps.py.Capps.getAll]", basicConfig.__dict__, configFilePath)
cappLibPlugin = CappLibPlugin(PluginDirPaths(\
defaults.pluginDirPaths, defaults.pluginDirNames).cappLibs,\
cappFlavorPlugin.flavor.cappLibName)
cappLibPlugin.load()
# Get the flavor plugin in its full configuration.
cappFlavorPlugin.loadMore(cappLibPlugin.module.FlavorConfigSetup())
print("[DEBUG][capps.py.Capps.getAll] flavor config (full):", cappFlavorPlugin.flavor)
# Get the capp handler.
cappLibPlugin.module.Capp(\
configSetup=cappLibPlugin.module.CappConfigSetup(\
configFilePaths=[basicConfig.cappConfigDirPath]),\
flavor=cappFlavorPlugin.flavor)
#print("[DEBUG][capphandler.py:Capps:getAll]", "Name of the chosen capp:", basicConfig.name)
#print("[DEBUG][capphandler.py:Capps:getAll]", "CappFlavor chosen:", cappFlavor.name) |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test for Transformer decoder."""
import argparse
import importlib
import numpy as np
import pytest
import torch
from neural_sp.datasets.token_converter.character import Idx2char
from neural_sp.models.torch_utils import np2tensor
from neural_sp.models.torch_utils import pad_list
ENC_N_UNITS = 16
VOCAB = 10
idx2token = Idx2char('test/decoders/dict.txt')
def make_args(**kwargs):
args = dict(
special_symbols={'blank': 0, 'unk': 1, 'eos': 2, 'pad': 3},
enc_n_units=ENC_N_UNITS,
attn_type='scaled_dot',
n_heads=4,
n_layers=2,
d_model=16,
d_ff=64,
ffn_bottleneck_dim=0,
pe_type='add',
layer_norm_eps=1e-12,
ffn_activation='relu',
vocab=VOCAB,
tie_embedding=False,
dropout=0.1,
dropout_emb=0.1,
dropout_att=0.1,
dropout_layer=0.0,
dropout_head=0.0,
lsm_prob=0.0,
ctc_weight=0.0,
ctc_lsm_prob=0.1,
ctc_fc_list='16_16',
backward=False,
global_weight=1.0,
mtl_per_batch=False,
param_init='xavier_uniform',
mma_chunk_size=4,
mma_n_heads_mono=1,
mma_n_heads_chunk=1,
mma_init_r=-4,
mma_eps=1e-6,
mma_std=1.0,
mma_no_denominator=False,
mma_1dconv=False,
mma_quantity_loss_weight=0.0,
mma_headdiv_loss_weight=0.0,
latency_metric='',
latency_loss_weight=0.0,
mma_first_layer=1,
share_chunkwise_attention=False,
external_lm=None,
lm_fusion='',
)
args.update(kwargs)
return args
@pytest.mark.parametrize(
"args",
[
# head
({'n_heads': 1}),
({'n_heads': 4}),
# positional encoding
({'pe_type': 'none'}),
({'pe_type': '1dconv3L'}),
# activation
({'ffn_activation': 'relu'}),
({'ffn_activation': 'gelu'}),
# ({'ffn_activation': 'glu'}),
({'ffn_activation': 'swish'}),
# MMA
({'attn_type': 'mocha', 'mma_chunk_size': 1, 'mma_n_heads_mono': 1}),
({'attn_type': 'mocha', 'mma_chunk_size': 1, 'mma_n_heads_mono': 4}),
({'attn_type': 'mocha', 'mma_chunk_size': 4, 'mma_n_heads_mono': 1, 'mma_n_heads_chunk': 1}),
({'attn_type': 'mocha', 'mma_chunk_size': 4, 'mma_n_heads_mono': 4, 'mma_n_heads_chunk': 1}),
({'attn_type': 'mocha', 'mma_chunk_size': 4, 'mma_n_heads_mono': 1, 'mma_n_heads_chunk': 4}),
({'attn_type': 'mocha', 'mma_chunk_size': 4, 'mma_n_heads_mono': 4, 'mma_n_heads_chunk': 4}),
({'attn_type': 'mocha', 'mma_chunk_size': 4, 'mma_n_heads_mono': 4, 'mma_n_heads_chunk': 4,
'share_chunkwise_attention': True}),
# MMA + HeadDrop
({'attn_type': 'mocha', 'dropout_head': 0.1, 'mma_chunk_size': 1, 'mma_n_heads_mono': 1}),
({'attn_type': 'mocha', 'dropout_head': 0.1, 'mma_chunk_size': 1, 'mma_n_heads_mono': 4}),
({'attn_type': 'mocha', 'dropout_head': 0.1, 'mma_chunk_size': 4, 'mma_n_heads_mono': 1, 'mma_n_heads_chunk': 1}),
({'attn_type': 'mocha', 'dropout_head': 0.1, 'mma_chunk_size': 4, 'mma_n_heads_mono': 4, 'mma_n_heads_chunk': 1}),
({'attn_type': 'mocha', 'dropout_head': 0.1, 'mma_chunk_size': 4, 'mma_n_heads_mono': 1, 'mma_n_heads_chunk': 4}),
({'attn_type': 'mocha', 'dropout_head': 0.1, 'mma_chunk_size': 4, 'mma_n_heads_mono': 4, 'mma_n_heads_chunk': 4}),
# regularization
({'lsm_prob': 0.1}),
({'dropout_layer': 0.1}),
({'dropout_head': 0.1}),
({'tie_embedding': True}),
# CTC
({'ctc_weight': 0.5}),
({'ctc_weight': 1.0}),
({'ctc_weight': 1.0, 'ctc_lsm_prob': 0.0}),
# forward-backward decoder
({'backward': True}),
({'backward': True, 'ctc_weight': 0.5}),
({'backward': True, 'ctc_weight': 1.0}),
# bottleneck
({'ffn_bottleneck_dim': 16}),
# TransformerLM init
# LM integration
]
)
def test_forward(args):
args = make_args(**args)
batch_size = 4
emax = 40
device = "cpu"
eouts = np.random.randn(batch_size, emax, ENC_N_UNITS).astype(np.float32)
elens = torch.IntTensor([len(x) for x in eouts])
eouts = pad_list([np2tensor(x, device).float() for x in eouts], 0.)
ylens = [4, 5, 3, 7]
ys = [np.random.randint(0, VOCAB, ylen).astype(np.int32) for ylen in ylens]
module = importlib.import_module('neural_sp.models.seq2seq.decoders.transformer')
dec = module.TransformerDecoder(**args)
loss, observation = dec(eouts, elens, ys, task='all')
assert loss.dim() == 1
assert loss.size(0) == 1
assert loss.item() >= 0
assert isinstance(observation, dict)
def make_decode_params(**kwargs):
args = dict(
recog_batch_size=1,
recog_beam_width=1,
recog_ctc_weight=0.0,
recog_lm_weight=0.0,
recog_lm_second_weight=0.0,
recog_lm_bwd_weight=0.0,
recog_cache_embedding=True,
recog_max_len_ratio=1.0,
recog_min_len_ratio=0.2,
recog_length_penalty=0.0,
recog_coverage_penalty=0.0,
recog_coverage_threshold=1.0,
recog_length_norm=False,
recog_eos_threshold=1.5,
recog_asr_state_carry_over=False,
recog_lm_state_carry_over=False,
recog_softmax_smoothing=1.0,
recog_mma_delay_threshold=-1,
nbest=1,
exclude_eos=False,
cache_states=True,
)
args.update(kwargs)
return args
def make_args_rnnlm(**kwargs):
args = dict(
lm_type='lstm',
n_units=16,
n_projs=0,
n_layers=2,
residual=False,
use_glu=False,
n_units_null_context=0,
bottleneck_dim=16,
emb_dim=16,
vocab=VOCAB,
dropout_in=0.1,
dropout_hidden=0.1,
lsm_prob=0.0,
param_init=0.1,
adaptive_softmax=False,
tie_embedding=False,
)
args.update(kwargs)
return argparse.Namespace(**args)
@pytest.mark.parametrize(
"backward, params",
[
# !!! forward
# greedy decoding
(False, {'recog_beam_width': 1}),
(False, {'recog_beam_width': 1, 'cache_states': False}),
(False, {'recog_beam_width': 1, 'exclude_eos': True}),
(False, {'recog_beam_width': 1, 'recog_batch_size': 4}),
# beam search
(False, {'recog_beam_width': 4}),
(False, {'recog_beam_width': 4, 'cache_states': False}),
(False, {'recog_beam_width': 4, 'exclude_eos': True}),
(False, {'recog_beam_width': 4, 'nbest': 2}),
(False, {'recog_beam_width': 4, 'nbest': 4}),
(False, {'recog_beam_width': 4, 'nbest': 4, 'softmax_smoothing': 2.0}),
(False, {'recog_beam_width': 4, 'recog_ctc_weight': 0.1}),
# length penalty
(False, {'recog_length_penalty': 0.1}),
(False, {'recog_length_norm': True}),
# shallow fusion
(False, {'recog_beam_width': 4, 'recog_lm_weight': 0.1}),
(False, {'recog_beam_width': 4, 'recog_lm_weight': 0.1, 'recog_cache_embedding': False}),
# rescoring
(False, {'recog_beam_width': 4, 'recog_lm_second_weight': 0.1}),
(False, {'recog_beam_width': 4, 'recog_lm_bwd_weight': 0.1}),
# !!! backward
# greedy decoding
(True, {'recog_beam_width': 1}),
(True, {'recog_beam_width': 1, 'cache_states': False}),
(True, {'recog_beam_width': 1, 'exclude_eos': True}),
(True, {'recog_beam_width': 1, 'recog_batch_size': 4}),
# beam search
(True, {'recog_beam_width': 4}),
(True, {'recog_beam_width': 4, 'cache_states': False}),
(True, {'recog_beam_width': 4, 'exclude_eos': True}),
(True, {'recog_beam_width': 4, 'nbest': 2}),
(True, {'recog_beam_width': 4, 'nbest': 4}),
(True, {'recog_beam_width': 4, 'nbest': 4, 'softmax_smoothing': 2.0}),
(True, {'recog_beam_width': 4, 'recog_ctc_weight': 0.1}),
]
)
def test_decoding(backward, params):
args = make_args()
params = make_decode_params(**params)
params['backward'] = backward
batch_size = params['recog_batch_size']
emax = 40
device = "cpu"
eouts = np.random.randn(batch_size, emax, ENC_N_UNITS).astype(np.float32)
elens = torch.IntTensor([len(x) for x in eouts])
eouts = pad_list([np2tensor(x, device).float() for x in eouts], 0.)
ctc_log_probs = None
if params['recog_ctc_weight'] > 0:
ctc_logits = torch.FloatTensor(batch_size, emax, VOCAB, device=device)
ctc_log_probs = torch.softmax(ctc_logits, dim=-1)
lm = None
if params['recog_lm_weight'] > 0:
args_lm = make_args_rnnlm()
module = importlib.import_module('neural_sp.models.lm.rnnlm')
lm = module.RNNLM(args_lm).to(device)
lm_second = None
if params['recog_lm_second_weight'] > 0:
args_lm = make_args_rnnlm()
module = importlib.import_module('neural_sp.models.lm.rnnlm')
lm_second = module.RNNLM(args_lm).to(device)
lm_second_bwd = None
if params['recog_lm_bwd_weight'] > 0:
args_lm = make_args_rnnlm()
module = importlib.import_module('neural_sp.models.lm.rnnlm')
lm_second_bwd = module.RNNLM(args_lm).to(device)
ylens = [4, 5, 3, 7]
ys = [np.random.randint(0, VOCAB, ylen).astype(np.int32) for ylen in ylens]
module = importlib.import_module('neural_sp.models.seq2seq.decoders.transformer')
dec = module.TransformerDecoder(**args)
dec = dec.to(device)
# TODO(hirofumi0810):
# recog_lm_state_carry_over
dec.eval()
with torch.no_grad():
if params['recog_beam_width'] == 1:
out = dec.greedy(eouts, elens, max_len_ratio=1.0, idx2token=idx2token,
exclude_eos=params['exclude_eos'],
refs_id=ys, utt_ids=None, speakers=None,
cache_states=params['cache_states'])
assert len(out) == 2
hyps, aws = out
assert isinstance(hyps, list)
assert len(hyps) == batch_size
assert isinstance(aws, list)
assert aws[0].shape == (args['n_heads'] * args['n_layers'], len(hyps[0]), emax)
else:
out = dec.beam_search(eouts, elens, params, idx2token=idx2token,
lm=lm, lm_second=lm_second, lm_second_bwd=lm_second_bwd,
ctc_log_probs=ctc_log_probs,
nbest=params['nbest'], exclude_eos=params['exclude_eos'],
refs_id=None, utt_ids=None, speakers=None,
cache_states=params['cache_states'])
assert len(out) == 3
nbest_hyps, aws, scores = out
assert isinstance(nbest_hyps, list)
assert len(nbest_hyps) == batch_size
assert len(nbest_hyps[0]) == params['nbest']
ymax = len(nbest_hyps[0][0])
assert isinstance(aws, list)
assert aws[0][0].shape == (args['n_heads'] * args['n_layers'], ymax, emax)
assert isinstance(scores, list)
assert len(scores) == batch_size
assert len(scores[0]) == params['nbest']
# ensemble
ensmbl_eouts, ensmbl_elens, ensmbl_decs = [], [], []
for _ in range(3):
ensmbl_eouts += [eouts]
ensmbl_elens += [elens]
ensmbl_decs += [dec]
out = dec.beam_search(eouts, elens, params, idx2token=idx2token,
lm=lm, lm_second=lm_second, lm_second_bwd=lm_second_bwd,
ctc_log_probs=ctc_log_probs,
nbest=params['nbest'], exclude_eos=params['exclude_eos'],
refs_id=None, utt_ids=None, speakers=None,
ensmbl_eouts=ensmbl_eouts, ensmbl_elens=ensmbl_elens, ensmbl_decs=ensmbl_decs,
cache_states=params['cache_states'])
assert len(out) == 3
nbest_hyps, aws, scores = out
assert isinstance(nbest_hyps, list)
assert len(nbest_hyps) == batch_size
assert len(nbest_hyps[0]) == params['nbest']
ymax = len(nbest_hyps[0][0])
assert isinstance(aws, list)
assert aws[0][0].shape == (args['n_heads'] * args['n_layers'], ymax, emax)
assert isinstance(scores, list)
assert len(scores) == batch_size
assert len(scores[0]) == params['nbest']
|
import os
DEFAULT = {
"LOCAL_PATH": os.path.expanduser("~/data/"),
"FEEDSTOCK_PATH": os.path.expanduser("~/feedstock/"),
"SERVICE_DATA": os.path.expanduser("~/integrations/"),
"CURATION_DATA": os.path.expanduser("~/curation/"),
"SCHEMA_PATH": os.path.abspath(os.path.join(os.path.dirname(__file__), "schemas", "schemas")),
"AUX_DATA_PATH": os.path.abspath(os.path.join(os.path.dirname(__file__), "schemas",
"connect_aux_data")),
# Minimum time (in days) to keep test submissions
"TEST_TTL": 30,
"PROCESSOR_WAIT_TIME": 20, # Seconds
"PROCESSOR_SLEEP_TIME": 40, # Seconds
"NUM_EXTRACTORS": 10,
"NUM_SUBMITTERS": 5,
"EXTRACTOR_ERROR_FILE": "extractor_errors.log",
"CANCEL_WAIT_TIME": 60, # Seconds
"TRANSFER_PING_INTERVAL": 20, # Seconds
"TRANSFER_WEB_APP_LINK": "https://app.globus.org/file-manager?origin_id={}&origin_path={}",
"TRANSFER_CANCEL_MSG": ("Your recent MDF Connect submission was cancelled due to a service"
" restart. Please resubmit your dataset. We apologize for the"
" inconvenience."),
"NUM_CURATION_RECORDS": 3,
"SCHEMA_NULLS": ["url"], # Just url from files
"SEARCH_BATCH_SIZE": 100,
"SEARCH_RETRIES": 3,
"SEARCH_PING_TIME": 2, # Seconds
# Fields in the mdf block that cannot be updated with /update
"NO_UPDATE_FIELDS_MDF": ["source_id", "source_name", "scroll_id", "version"],
"DATASET_LANDING_PAGE": "https://petreldata.net/mdf/detail/{}",
"RECORD_LANDING_PAGE": "https://petreldata.net/mdf/detail/{}.{}",
"CITRINATION_LINK": "https://citrination.com/datasets/{cit_ds_id}/",
"MRR_URL": "https://mrr.materialsdatafacility.org/rest/data/",
"MRR_SCHEMA": "5df1452da623810013116d89",
"MRR_LINK": "https://mrr.materialsdatafacility.org/data?id={}",
"API_CLIENT_ID": "c17f27bb-f200-486a-b785-2a25e82af505",
"API_SCOPE": "https://auth.globus.org/scopes/c17f27bb-f200-486a-b785-2a25e82af505/connect",
"API_SCOPE_ID": "mdf_dataset_submission",
"TRANSFER_SCOPE": "urn:globus:auth:scope:transfer.api.globus.org:all",
# Regexes for detecting Globus Web App links
"GLOBUS_LINK_FORMS": [
"^https:\/\/www\.globus\.org\/app\/transfer", # noqa: W605 (invalid escape char '\/')
"^https:\/\/app\.globus\.org\/file-manager", # noqa: W605
"^https:\/\/app\.globus\.org\/transfer", # noqa: W605
"^https:\/\/.*globus.*(?=.*origin_id)(?=.*origin_path)", # noqa: W605
"^https:\/\/.*globus.*(?=.*destination_id)(?=.*destination_path)" # noqa: W605
],
# Using Prod-P GDrive EP because having two GDrive EPs on one account seems to fail
"GDRIVE_EP": "f00dfd6c-edf4-4c8b-a4b1-be6ad92a4fbb",
"GDRIVE_ROOT": "/Shared With Me",
"ADMIN_GROUP_ID": "5fc63928-3752-11e8-9c6f-0e00fd09bf20",
"EXTRACT_GROUP_ID": "cc192dca-3751-11e8-90c1-0a7c735d220a"
}
with open(os.path.join(DEFAULT["SCHEMA_PATH"], "mrr_template.xml")) as f:
DEFAULT["MRR_TEMPLATE"] = f.read()
with open(os.path.join(DEFAULT["SCHEMA_PATH"], "mrr_contributor.xml")) as f:
DEFAULT["MRR_CONTRIBUTOR"] = f.read()
|
"""rio-tiler-crs tile server."""
import logging
import os
from enum import Enum
from typing import Any, Dict, List
import morecantile
import uvicorn
from fastapi import FastAPI, Path, Query
from rasterio.crs import CRS
from starlette.background import BackgroundTask
from starlette.middleware.cors import CORSMiddleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.requests import Request
from starlette.responses import Response
from rio_tiler.profiles import img_profiles
from rio_tiler.utils import render
from rio_tiler_crs import COGReader
log = logging.getLogger()
# From developmentseed/titiler
drivers = dict(jpg="JPEG", png="PNG", tif="GTiff", webp="WEBP")
mimetype = dict(
png="image/png",
npy="application/x-binary",
tif="image/tiff",
jpg="image/jpg",
webp="image/webp",
)
WGS84_CRS = CRS.from_epsg(4326)
# CUSTOM TMS for EPSG:3413
extent = (-4194300, -4194300, 4194300, 4194300)
crs = CRS.from_epsg(3413)
EPSG3413 = morecantile.TileMatrixSet.custom(
extent, crs, identifier="EPSG3413", matrix_scale=[2, 2]
)
morecantile.tms.register(EPSG3413)
class ImageType(str, Enum):
"""Image Type Enums."""
png = "png"
npy = "npy"
tif = "tif"
jpg = "jpg"
webp = "webp"
class XMLResponse(Response):
"""XML Response"""
media_type = "application/xml"
class TileResponse(Response):
"""Tiler's response."""
def __init__(
self,
content: bytes,
media_type: str,
status_code: int = 200,
headers: dict = {},
background: BackgroundTask = None,
ttl: int = 3600,
) -> None:
"""Init tiler response."""
headers.update({"Content-Type": media_type})
if ttl:
headers.update({"Cache-Control": "max-age=3600"})
self.body = self.render(content)
self.status_code = 200
self.media_type = media_type
self.background = background
self.init_headers(headers)
def ogc_wmts(
endpoint: str,
tms: morecantile.TileMatrixSet,
bounds: List[float] = [-180.0, -90.0, 180.0, 90.0],
minzoom: int = 0,
maxzoom: int = 24,
query_string: str = "",
title: str = "Cloud Optimizied GeoTIFF",
) -> str:
"""
Create WMTS XML template.
Attributes
----------
endpoint : str, required
tiler endpoint.
tms : morecantile.TileMatrixSet
Custom Tile Matrix Set.
bounds : tuple, optional
WGS84 layer bounds (default: [-180.0, -90.0, 180.0, 90.0]).
query_string : str, optional
Endpoint querystring.
minzoom : int, optional (default: 0)
min zoom.
maxzoom : int, optional (default: 25)
max zoom.
title: str, optional (default: "Cloud Optimizied GeoTIFF")
Layer title.
Returns
-------
xml : str
OGC Web Map Tile Service (WMTS) XML template.
"""
content_type = "image/png"
layer = tms.identifier
tileMatrixArray = []
for zoom in range(minzoom, maxzoom + 1):
matrix = tms.matrix(zoom)
tm = f"""
<TileMatrix>
<ows:Identifier>{matrix.identifier}</ows:Identifier>
<ScaleDenominator>{matrix.scaleDenominator}</ScaleDenominator>
<TopLeftCorner>{matrix.topLeftCorner[0]} {matrix.topLeftCorner[1]}</TopLeftCorner>
<TileWidth>{matrix.tileWidth}</TileWidth>
<TileHeight>{matrix.tileHeight}</TileHeight>
<MatrixWidth>{matrix.matrixWidth}</MatrixWidth>
<MatrixHeight>{matrix.matrixHeight}</MatrixHeight>
</TileMatrix>"""
tileMatrixArray.append(tm)
tileMatrix = "\n".join(tileMatrixArray)
xml = f"""<Capabilities
xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://www.opengis.net/wmts/1.0 http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd"
version="1.0.0">
<ows:ServiceIdentification>
<ows:Title>{title}</ows:Title>
<ows:ServiceType>OGC WMTS</ows:ServiceType>
<ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>
</ows:ServiceIdentification>
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="{endpoint}/{layer}/wmts?{query_string}">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>RESTful</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="{endpoint}/{layer}/wmts?{query_string}">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>RESTful</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<Contents>
<Layer>
<ows:Identifier>{layer}</ows:Identifier>
<ows:WGS84BoundingBox crs="urn:ogc:def:crs:OGC:2:84">
<ows:LowerCorner>{bounds[0]} {bounds[1]}</ows:LowerCorner>
<ows:UpperCorner>{bounds[2]} {bounds[3]}</ows:UpperCorner>
</ows:WGS84BoundingBox>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>{content_type}</Format>
<TileMatrixSetLink>
<TileMatrixSet>{layer}</TileMatrixSet>
</TileMatrixSetLink>
<ResourceURL
format="{content_type}"
resourceType="tile"
template="{endpoint}/tiles/{layer}/{{TileMatrix}}/{{TileCol}}/{{TileRow}}.png?{query_string}"/>
</Layer>
<TileMatrixSet>
<ows:Identifier>{layer}</ows:Identifier>
<ows:SupportedCRS>EPSG:{tms.crs.to_epsg()}</ows:SupportedCRS>
{tileMatrix}
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href='{endpoint}/{layer}/wmts?{query_string}'/>
</Capabilities>"""
return xml
app = FastAPI(
title="rio-tiler-crs",
description="A lightweight Cloud Optimized GeoTIFF tile server",
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["GET"],
allow_headers=["*"],
)
app.add_middleware(GZipMiddleware, minimum_size=0)
responses = {
200: {
"content": {
"image/png": {},
"image/jpg": {},
"image/webp": {},
"image/tiff": {},
"application/x-binary": {},
},
"description": "Return an image.",
}
}
tile_routes_params: Dict[str, Any] = dict(
responses=responses, tags=["tiles"], response_class=TileResponse
)
@app.get(r"/tiles/{z}/{x}/{y}\.png", **tile_routes_params)
@app.get(r"/tiles/{identifier}/{z}/{x}/{y}\.png", **tile_routes_params)
@app.get(r"/tiles/{z}/{x}/{y}@{scale}x\.png", **tile_routes_params)
@app.get(r"/tiles/{identifier}/{z}/{x}/{y}@{scale}x\.png", **tile_routes_params)
def _tile(
z: int,
x: int,
y: int,
scale: int = Query(
1, gt=0, lt=4, description="Tile size scale. 1=256x256, 2=512x512..."
),
identifier: str = Query("WebMercatorQuad", title="TMS identifier"),
filename: str = Query(...),
):
"""Handle /tiles requests."""
tms = morecantile.tms.get(identifier)
with COGReader(f"{filename}.tif", tms=tms) as cog: # type: ignore
tile, mask = cog.tile(x, y, z, tilesize=scale * 256)
ext = ImageType.png
driver = drivers[ext.value]
options = img_profiles.get(driver.lower(), {})
img = render(tile, mask, img_format="png", **options)
return TileResponse(img, media_type=mimetype[ext.value])
@app.get(
r"/WMTSCapabilities.xml",
responses={200: {"content": {"application/xml": {}}}},
response_class=XMLResponse,
)
@app.get(
r"/{identifier}/WMTSCapabilities.xml",
responses={200: {"content": {"application/xml": {}}}},
response_class=XMLResponse,
)
def _wmts(
request: Request,
response: Response,
identifier: str = Path("WebMercatorQuad", title="TMS identifier"),
filename: str = Query(...),
):
"""Handle /tiles requests."""
tms = morecantile.tms.get(identifier)
host = request.headers["host"]
scheme = request.url.scheme
endpoint = f"{scheme}://{host}"
with COGReader(f"{filename}.tif", tms=tms) as cog: # type: ignore
meta = cog.info
return XMLResponse(
ogc_wmts(
endpoint,
tms,
bounds=meta["bounds"],
query_string=f"filename={filename}",
minzoom=meta["minzoom"],
maxzoom=meta["maxzoom"],
title=os.path.basename(filename),
)
)
if __name__ == "__main__":
uvicorn.run(app=app, host="0.0.0.0", port=8501, log_level="info")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 09:03:59 2020
@author: edward
dataset for pytorchd dataloading
"""
import torch
import os, glob, itertools, random
from torch.utils.data import Dataset, DataLoader
import numpy as np
#from tqdm import tqdm
import pprint
pp = pprint.PrettyPrinter(indent=4)
from numba import jit
@jit(nopython=True)
def euclidean_distance(p1,p2):
return np.linalg.norm(p1 - p2)
#return (p1 - p2).norm(2)
@jit(nopython=True)
def compute_local_distances2(positions):
num_nodes = positions.shape[0]
distances = np.ones((num_nodes, num_nodes)) * 1000000
for i in range(num_nodes):
for j in range(num_nodes):
distances[i,j] = euclidean_distance(positions[i,:2], positions[j,:2]) # remove angle
return distances
def compute_local_distances(positions):
return torch.from_numpy(compute_local_distances2(positions.numpy())).float()
def euclidean_distance3(p1,p2):
return (p1 - p2).norm(2)
def compute_local_distances3(positions):
num_nodes = positions.size(0)
distances = torch.ones(num_nodes, num_nodes) * 1000000
for i,j in itertools.product(range(num_nodes), range(num_nodes)):
#if adj_mat[i,j]: # switched of as probabilities are used during training
distances[i,j] = euclidean_distance3(positions[i,:2], positions[j,:2]) # remove angle
return distances
class GraphDataset(Dataset):
def __init__(self, path, noise_level=0, data_limit=0, min_length=2, normalize=False, weight_mask=False):
print('Loading dataset from path', path)
self.normalize = normalize
self.files = sorted(glob.glob(os.path.join(path, '*.pth')))
if data_limit:
assert data_limit <= len(self.files), (len(self.files), path)
self.files = self.files[:data_limit]
self.noise_level = noise_level
self.graph_data = [torch.load(file) for file in self.files]
self.weight_mask = weight_mask
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
#graph_data = torch.load(self.files[idx])
target = random.choice(range(32)) #
graph_data = self.graph_data[idx]
#target = idx % 32
optimal_actions = graph_data['optimal_actions'][:, target]
features = graph_data['features']
#resnet_features = graph_data['resnet_features']
resnet_features = graph_data.get('resnet_features', None)
weights = graph_data.get('weight_mat', None)
if weights is not None:
weights = torch.min(weights[:, target], torch.ones_like(weights[:, target])*1000.0)
positions = graph_data['positions']
#local_distances = graph_data['local_distance_matrix']
target_node = graph_data['features'][target].unsqueeze(0) # remove target position
target_one_hot = torch.zeros(32)
target_one_hot[target] = 1
local_distances = compute_local_distances(positions) # recompute to remove high values
target_distances = graph_data['global_distance_matrix'][:,target]
if self.normalize:
local_distances = local_distances / local_distances.max()
sample = {'positions': positions,
'features': features ,
'optimal_actions': optimal_actions,
'linkages': graph_data['predicted_adj_mat'],
'ground_linkages': graph_data['ground_adj_mat'],
'target_node': target_node,
'target_one_hot': target_one_hot,
#'target_id': torch.Tensor([target]).long(),
'local_distances': local_distances,
#'path_lengths': graph_data['path_length_mat'],
'target_path_lengths': graph_data['path_length_mat'][target],
'target_distances': target_distances
} # TODO this should not include adjacency
if weights is not None:
if self.weight_mask:
weights[:2] = 0.0
sample['weights'] = weights
if resnet_features is not None:
sample['resnet_features'] = resnet_features.squeeze(0)
return sample
def plot_graph(graph_data):
# draw verticies
import matplotlib.pyplot as plt
num_nodes = graph_data['positions'].size(0)
positions = graph_data['positions'].numpy()
adj_mat = graph_data['ground_linkages'].numpy()
optimal_actions = graph_data['optimal_actions']
# draw edges
for i,j in itertools.product(range(num_nodes), range(num_nodes)):
if adj_mat[i,j]:
plt.plot([positions[i,0], positions[j,0]],
[positions[i,1], positions[j,1]],alpha=0.1, c='k')
plt.scatter(positions[:,:1],
positions[:,1:2])
start = random.choice(range(num_nodes))
end = np.argmax(graph_data['target_one_hot'].numpy())
plt.scatter(positions[start,0],
positions[start,1], c='g', s=200)
plt.scatter(positions[end,0],
positions[end,1], c='r', s=200)
current = start
while current != end:
next_ind = optimal_actions[current]
plt.plot([positions[current,0], positions[next_ind,0]],
[positions[current,1], positions[next_ind,1]],alpha=0.5, c='m')
current = next_ind
def old():
path = 'data/graph_data3_distance_weights/train/'
for stage in ['train', 'val']:
dataset = GraphDataset(path.replace('train', stage), data_limit=10)
x_min, y_min = 1000.0, 1000.0
x_max, y_max = -1000.0, -1000.0
for i in range(1):
graph_data = dataset[i]
distances = graph_data['local_distances']
x_min = min(x_min, distances.min().item())
x_max = max(x_max, distances.max().item())
print(stage, x_min, x_max)
if __name__ == '__main__':
path = 'data/graph_data5_distance_weights/train/'
stats = {
'train': {},
'val': {}
}
for stage in ['train', 'val']:
dataset = GraphDataset(path.replace('train', stage), data_limit=0, normalize=True)
example = dataset[0]
for key in example.keys():
stats[stage][key + '_min'] = 10000
stats[stage][key + '_max'] = -10000
stats[stage][key + '_min_max'] = 10000
stats[stage][key + '_max_min'] = -10000
num_issues = 0
print('#'*40)
print(stage)
for i in range(len(dataset)):
example = dataset[i]
for key in example.keys():
if torch.any(example[key] != example[key]):
print('nans', key, i, dataset.files[i])
os.remove(dataset.files[i])
num_issues += 1
stats[stage][key + '_min'] = min(stats[stage][key + '_min'], example[key].min().item())
stats[stage][key + '_max'] = max(stats[stage][key + '_max'], example[key].max().item())
stats[stage][key + '_min_max'] = min(stats[stage][key + '_min'], example[key].max().item())
stats[stage][key + '_max_min'] = max(stats[stage][key + '_max'], example[key].min().item())
stats[stage]['num_issues'] = num_issues
pp.pprint(stats)
|
import numpy as np
import torch.utils.data as data
import torch
import os, glob
from six.moves import xrange # pylint: disable=redefined-builtin
import PIL.Image as Image
import random
import numpy as np
import cv2
import time
class DatasetReader_fullvid(data.Dataset):
def __init__(self, video_path):
self.video_path = video_path
self.num_frames_per_clip = 16
self.crop_size = 112
self.np_mean = np.load('crop_mean.npy').reshape([self.num_frames_per_clip, self.crop_size, self.crop_size, 3])
def __getitem__(self,index):
clip_frames = []
s_index = index * 8
for idx in range(s_index, s_index + self.num_frames_per_clip):
image_name = self.video_path + '/{:05}.jpg'.format(idx)
#print(image_name)
img = Image.open(image_name)
img_data = np.array(img)
crop_x = int((img_data.shape[0] - self.crop_size)/2)
crop_y = int((img_data.shape[1] - self.crop_size)/2)
img_data = img_data[crop_x:crop_x+self.crop_size, crop_y:crop_y+self.crop_size,:] - self.np_mean[idx%8]
clip_frames.append(img_data)
clip_frames = torch.from_numpy( np.array(clip_frames).astype(np.float32).transpose(3,0,1,2) )
return clip_frames
def __len__(self):
num_frames = len(glob.glob(self.video_path + '/*.jpg'))
return int((num_frames-8)/8)
class DatasetReader_random_partialvid(data.Dataset):
def __init__(self, video_path):
# video path is a folder path that contains all video frames
self.video_path = video_path
self.num_frames_per_clip = 16
self.crop_size = 112
self.np_mean = np.load('crop_mean.npy').reshape([self.num_frames_per_clip, self.crop_size, self.crop_size, 3])
# randomly select a partial clip
self.num_frames = len(glob.glob(self.video_path + '/*.jpg'))
for try_ in range(10):
self.random_clip_len = np.random.uniform(low=0.105, high=0.3) * self.num_frames
self.random_clip_len = int(self.random_clip_len / 8) * 8
if self.random_clip_len >= 16:
break
if self.random_clip_len < 16:
self.random_clip_len = 16
self.start = np.random.randint(low=0, high=self.num_frames-self.random_clip_len)
self.end = self.start + self.random_clip_len
# get a clip (16 frames)
def __getitem__(self,index):
clip_frames = []
s_index = index * 8 + self.start
for idx in range(s_index, s_index + self.num_frames_per_clip):
image_name = self.video_path + '/{:05}.jpg'.format(idx)
#print(image_name)
img = Image.open(image_name)
img_data = np.array(img)
crop_x = int((img_data.shape[0] - self.crop_size)/2)
crop_y = int((img_data.shape[1] - self.crop_size)/2)
img_data = img_data[crop_x:crop_x+self.crop_size, crop_y:crop_y+self.crop_size,:] - self.np_mean[idx%8]
clip_frames.append(img_data)
clip_frames = torch.from_numpy( np.array(clip_frames).astype(np.float32).transpose(3,0,1,2) )
return clip_frames
def __len__(self):
return int((self.random_clip_len-8)/8)
def get_start_end_frame_id(self):
return self.start, self.end, float(self.random_clip_len)/self.num_frames
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.tpu import tpu_embedding
from tensorflow.python.tpu import tpu_embedding_v2
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
flags.DEFINE_string('model_dir', os.environ.get('TEST_TMPDIR'),
'A temporary directory.')
class TPUEmbeddingCheckpointTest(parameterized.TestCase, test.TestCase):
def setUp(self):
super(TPUEmbeddingCheckpointTest, self).setUp()
self.resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
remote.connect_to_cluster(self.resolver)
tpu_strategy_util.initialize_tpu_system(self.resolver)
self.strategy = tpu_strategy.TPUStrategy(self.resolver)
self.num_rows = self.strategy.num_replicas_in_sync
# These tests use two mid level API objects, initialized with different
# values. These have the same sizes.
with self.strategy.scope():
self.first_mid_level_contents = np.ones((self.num_rows, 4))
self.first_mid_level_optimizer = tpu_embedding_v2_utils.SGD(
learning_rate=0.1)
self.first_mid_level = self.build_mid_level(
self.first_mid_level_contents, self.first_mid_level_optimizer)
self.second_mid_level_contents = np.ones((self.num_rows, 4)) * 2
self.second_mid_level_optimizer = tpu_embedding_v2_utils.SGD(
learning_rate=0.1)
self.second_mid_level = self.build_mid_level(
self.second_mid_level_contents, self.second_mid_level_optimizer,
initialize_tpu_embedding=False)
self.cpu_mid_level_optimizer = tpu_embedding_v2_utils.SGD(
learning_rate=0.1)
self.cpu_mid_level = self.build_mid_level(
self.second_mid_level_contents, self.cpu_mid_level_optimizer)
def tearDown(self):
tpu_strategy_util.shutdown_tpu_system(self.resolver)
super(TPUEmbeddingCheckpointTest, self).tearDown()
def test_checkpoint_save_retrieves(self):
# Ensure that the variables from the first model are loaded.
self.first_mid_level._load_variables()
self.assertAllClose(
self.first_mid_level_contents,
self.make_checkpoint_and_get_embedding('before_load',
self.first_mid_level),
msg='Checkpoint should contain values from the first api object.')
self.second_mid_level._load_variables()
# When we load the variables from the second mid level API object to the TPU
# we expect that checkpointing the first mid level API object will now
# retrieve the values from the TPU which are now different from the current
# variables in the first mid level.
self.assertAllClose(
self.second_mid_level_contents,
self.make_checkpoint_and_get_embedding('after_load',
self.first_mid_level),
msg='Checkpoint should contain values from the second api object.')
def test_checkpoint_restore_loads(self):
def get_values(mid):
return ops.convert_to_tensor(
mid._variables['table']['parameters'].variables[0])
self.first_mid_level._load_variables()
first_checkpoint = util.Checkpoint(model=self.first_mid_level)
first_checkpoint.save(_get_tmpdir('restore', 'save'))
# Checkpoint now has values from first_mid_level. See first assert in
# test_checkpoint_save_retrieves.
self.second_mid_level._load_variables()
self.assertAllClose(
self.second_mid_level_contents,
get_values(self.second_mid_level),
msg='Second mid level api should contain its initial values.',
)
# We restore the checkpoint of our first model into our second model.
# This should load the first mid level API object onto the TPU.
second_checkpoint = util.Checkpoint(model=self.second_mid_level)
second_checkpoint.restore(_get_tmpdir('restore', 'save-1'))
# Call retrieve here as a way to check what the TPU contains contains.
# Calling the retrieve ops directly might make for a cleaner separation of
# test and module, though.
self.second_mid_level._retrieve_variables()
self.assertAllClose(
self.first_mid_level_contents,
get_values(self.second_mid_level),
msg='Second mid level api should have retrieved the first model values.'
)
def test_checkpoint_restore_before_variable_creation(self):
class TestModule(module.Module):
def __init__(self, initializer, rows):
self._initializer = initializer
self._rows = rows
def create_embedding(self):
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=self._rows, dim=4, initializer=self._initializer,
combiner='sum', name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
optimizer = tpu_embedding_v2_utils.SGD()
self.tpu_embedding = tpu_embedding_v2.TPUEmbedding(
feature_config, self._rows, optimizer)
# We need to clear the already loaded config provided by setUp method.
tpu_strategy_util.initialize_tpu_system(self.resolver)
with self.strategy.scope():
module1 = TestModule(init_ops_v2.Ones(),
self.strategy.num_replicas_in_sync * 2)
module1.create_embedding()
checkpoint = util.Checkpoint(test_module=module1)
checkpoint.save(_get_tmpdir('restore_before_create', 'save'))
tpu_strategy_util.initialize_tpu_system(self.resolver)
with self.strategy.scope():
module2 = TestModule(init_ops_v2.Zeros(),
self.strategy.num_replicas_in_sync * 2)
checkpoint = util.Checkpoint(test_module=module2)
checkpoint.restore(_get_tmpdir('restore_before_create', 'save-1'))
with self.strategy.scope():
module2.create_embedding()
def get_values(mid):
return mid._variables['table']['parameters'].variables[0].numpy()
self.assertAllClose(np.ones((self.strategy.num_replicas_in_sync * 2, 4)),
get_values(module2.tpu_embedding))
# Fetch the values from the TPU to check that they are the same.
module2.tpu_embedding._retrieve_variables()
self.assertAllClose(np.ones((self.strategy.num_replicas_in_sync * 2, 4)),
get_values(module2.tpu_embedding))
def build_mid_level(self, embedding_values, optimizer,
initialize_tpu_embedding=True):
"""Creates an embedding api object initialized to embedding_values."""
initializer = init_ops_v2.Constant(embedding_values)
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=self.num_rows, dim=4, initializer=initializer,
combiner='sum', name='table')
feature_config = (tpu_embedding_v2_utils.FeatureConfig(
table=table, name='feature'),)
# batch_size here does not matter as we aren't training in any of these
# tests.
return tpu_embedding_v2.TPUEmbedding(
feature_config, 64, optimizer,
initialize_tpu_embedding=initialize_tpu_embedding)
def make_checkpoint_and_get_embedding(self, name, model):
"""Saves model to checkpoint name, retrieves embedding variables."""
checkpoint = util.Checkpoint(model=model)
checkpoint.save(_get_tmpdir(name, 'save'))
# Get the name of the parameters variable which should be the only
# [self.num_rows, 4] shaped tensor in the checkpoint. Note that we do this
# as the key can change.
variables = checkpoint_utils.list_variables(_get_tmpdir(name))
variables = [name for name, size in variables if size == [self.num_rows, 4]]
if len(variables) != 1:
raise RuntimeError('Found {} copies of the parameter variable in the '
'checkpoint. Exactly one copy exported.'.format(
len(variables)))
return checkpoint_utils.load_variable(_get_tmpdir(name), variables[0])
def test_model_export_cpu(self):
self.first_mid_level._load_variables()
tpu_checkpoint = util.Checkpoint(model=self.first_mid_level)
tpu_checkpoint.save(_get_tmpdir('export_cpu', 'save'))
# We restore the checkpoint of our tpu mid level onto our cpu mid level.
cpu_checkpoint = util.Checkpoint(model=self.cpu_mid_level)
cpu_checkpoint.restore(_get_tmpdir('export_cpu', 'save-1'))
@def_function.function
def serve_tensors(features):
features = tpu_embedding_v2.cpu_embedding_lookup(
features, None, self.cpu_mid_level.embedding_tables,
self.cpu_mid_level._feature_config)
return features[0]
signatures = {
'serving_default':
serve_tensors.get_concrete_function(
(tensor_spec.TensorSpec(
shape=(2,), dtype=dtypes.int32, name='feature'),))}
save.save(self.cpu_mid_level,
export_dir=_get_tmpdir('export_cpu', 'exported_model'),
signatures=signatures)
imported = load.load(_get_tmpdir('export_cpu', 'exported_model'))
predict_fn = imported.signatures['serving_default']
input_feature_value = np.array([1, 0])
input_batch = (constant_op.constant(input_feature_value,
dtype=dtypes.int32),)
prediction = predict_fn(*input_batch)['output_0']
self.assertAllClose(prediction.numpy(),
self.first_mid_level_contents[input_feature_value])
@parameterized.parameters(tpu_embedding_v2_utils.SGD,
tpu_embedding_v2_utils.Adagrad,
tpu_embedding_v2_utils.Adam)
def test_check_checkpoint_variable_names_are_same_on_cpu_and_tpu(self,
optimizer):
# Reinitialize the TPU so that we can re-initialize the embeddings with the
# given optimizer.
tpu_strategy_util.initialize_tpu_system(self.resolver)
optimizer = optimizer(learning_rate=0.1)
with self.strategy.scope():
tpu_mid_level = self.build_mid_level(
self.first_mid_level_contents, optimizer)
tpu_checkpoint = util.Checkpoint(model=tpu_mid_level)
tpu_checkpoint.save(_get_tmpdir('save-tpu', 'save'))
tpu_variables = checkpoint_utils.list_variables(_get_tmpdir('save-tpu'))
cpu_mid_level = self.build_mid_level(
self.first_mid_level_contents, optimizer)
cpu_checkpoint = util.Checkpoint(model=cpu_mid_level)
cpu_checkpoint.save(_get_tmpdir('save-cpu', 'save'))
cpu_variables = checkpoint_utils.list_variables(_get_tmpdir('save-cpu'))
self.assertAllEqual(tpu_variables, cpu_variables)
class TPUEmbeddingTest(parameterized.TestCase, test.TestCase):
def setUp(self):
super(TPUEmbeddingTest, self).setUp()
self.embedding_values = np.array(list(range(32)), dtype=np.float64)
self.initializer = init_ops_v2.Constant(self.embedding_values)
# Embedding for video initialized to
# 0 1 2 3
# 4 5 6 7
# ...
self.table_video = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=8,
dim=4,
initializer=self.initializer,
combiner='sum',
name='video')
# Embedding for user initialized to
# 0 1
# 2 3
# 4 5
# 6 7
# ...
self.table_user = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=16,
dim=2,
initializer=self.initializer,
combiner='mean',
name='user')
self.feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends'))
self.batch_size = 2
self.data_batch_size = 4
# One (global) batch of inputs
# sparse tensor for watched:
# row 0: 0
# row 1: 0, 1
# row 2: 0, 1
# row 3: 1
self.feature_watched_indices = [[0, 0], [1, 0], [1, 1],
[2, 0], [2, 1], [3, 0]]
self.feature_watched_values = [0, 0, 1, 0, 1, 1]
self.feature_watched_row_lengths = [1, 2, 2, 1]
# sparse tensor for favorited:
# row 0: 0, 1
# row 1: 1
# row 2: 0
# row 3: 0, 1
self.feature_favorited_indices = [[0, 0], [0, 1], [1, 0],
[2, 0], [3, 0], [3, 1]]
self.feature_favorited_values = [0, 1, 1, 0, 0, 1]
self.feature_favorited_row_lengths = [2, 1, 1, 2]
# sparse tensor for friends:
# row 0: 3
# row 1: 0, 1, 2
# row 2: 3
# row 3: 0, 1, 2
self.feature_friends_indices = [[0, 0], [1, 0], [1, 1], [1, 2],
[2, 0], [3, 0], [3, 1], [3, 2]]
self.feature_friends_values = [3, 0, 1, 2, 3, 0, 1, 2]
self.feature_friends_row_lengths = [1, 3, 1, 3]
self.resolver = None
def tearDown(self):
if self.resolver:
tpu_strategy_util.shutdown_tpu_system(self.resolver)
super(TPUEmbeddingTest, self).tearDown()
def test_tables_with_same_name(self):
with self.assertRaisesRegex(
ValueError, 'Multiple tables with name table found.'):
with self._get_strategy().scope():
tpu_embedding_v2.TPUEmbedding(
(tpu_embedding_v2_utils.FeatureConfig(
table=tpu_embedding_v2_utils.TableConfig(
name='table',
vocabulary_size=4,
dim=2,
initializer=self.initializer,),
name='watched'),
tpu_embedding_v2_utils.FeatureConfig(
table=tpu_embedding_v2_utils.TableConfig(
name='table',
vocabulary_size=4,
dim=2,
initializer=self.initializer),
name='favorited')),
self.batch_size,
tpu_embedding_v2_utils.SGD(learning_rate=0.1))
def test_unsupported_optimizer(self):
with self.assertRaisesRegex(
ValueError, 'is an unsupported optimizer class.'):
with self._get_strategy().scope():
tpu_embedding_v2.TPUEmbedding(
self.feature_config, self.batch_size,
tpu_embedding.AdagradParameters(learning_rate=0.1))
def test_pass_non_tensor_to_apply_gradients(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
@def_function.function
def test_apply():
mid_level_api.apply_gradients((1, 2, 3))
with self.assertRaisesRegex(ValueError, 'Expected Tensor.'):
strategy.run(test_apply)
def test_pass_different_structure_to_apply_gradients(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
@def_function.function
def test_apply():
# This should be a tuple as feature_config is a tuple of 3 configs.
mid_level_api.apply_gradients([1, 2, 3])
with self.assertRaisesRegex(
TypeError,
'The two structures don\'t have the same nested structure.'):
strategy.run(test_apply)
def test_pass_none_to_apply_gradients(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
data = next(iter(strategy.experimental_distribute_dataset(dataset)))
@def_function.function
def embedding_and_set_gradients(data):
mid_level_api.enqueue(data)
def tpu_fn():
results = mid_level_api.dequeue()
mid_level_api.apply_gradients((None, None,
array_ops.ones_like(results[2])))
return results
return strategy.run(tpu_fn)
@def_function.function
def embedding_only(data):
mid_level_api.enqueue(data, training=False)
def tpu_fn():
return mid_level_api.dequeue()
return strategy.run(tpu_fn)
first = self._get_replica_numpy(
embedding_and_set_gradients(data), strategy, 0)
second = self._get_replica_numpy(embedding_only(data), strategy, 0)
# First two features should be the same as None gradient was applied.
# Third feature had gradient of 1 passed in from each core.
# Each core received the same ids per core and returned the following batch:
# [ row 3, row 0 + row 1 + row 2 ]
# so gradient update was (learning rate = 0.1):
# row 0: -1/3*0.1
# row 1: -1/3*0.1
# row 2: -1/3*0.1
# row 3: -1*0.1
# There is a factor of num_replicas because each replica gave an update.
num_replicas = strategy.num_replicas_in_sync
update = ([[0.0]], [[0.0]],
[[0.1 * num_replicas], [0.1 / 3 * num_replicas]])
golden = tuple([feature-np.array(up) for feature, up in zip(first, update)])
self.assertAllClose(golden, second)
def _get_strategy(self):
self.resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
remote.connect_to_cluster(self.resolver)
tpu_strategy_util.initialize_tpu_system(self.resolver)
return tpu_strategy.TPUStrategy(self.resolver)
def test_dequeue_on_cpu(self):
mid_level_api = self._create_mid_level()
with self.assertRaises(RuntimeError):
mid_level_api.dequeue()
def test_enqueue_on_cpu(self):
mid_level_api = self._create_mid_level()
features = {
'watched': sparse_tensor.SparseTensor(
indices=self.feature_watched_indices,
values=self.feature_watched_values,
dense_shape=[2, 2])}
with self.assertRaises(RuntimeError):
mid_level_api.enqueue(features)
def test_apply_gradients_on_cpu(self):
mid_level_api = self._create_mid_level()
with self.assertRaises(RuntimeError):
mid_level_api.enqueue(None)
def test_get_embedding_tables_on_cpu(self):
mid_level_api = self._create_mid_level()
self.assertEqual(
set(mid_level_api.embedding_tables.keys()),
set([self.table_video, self.table_user]))
def test_get_embedding_tables_on_tpu(self):
with self._get_strategy().scope():
mid_level_api = self._create_mid_level()
with self.assertRaises(RuntimeError):
mid_level_api.embedding_tables()
def test_enqueue_weight_for_dense_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
input_fn = self._create_dense_input_fn(strategy, include_weights=True)
dist = strategy.experimental_distribute_datasets_from_function(input_fn)
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features, weights = next(dist_iter)
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(step)
with self.assertRaisesRegex(ValueError, 'Weight specified for dense input'):
test_fn()
def test_enqueue_wrong_weight_type_for_sparse_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
ragged = self._create_ragged_dataset(strategy, include_weights=True)
sparse_iter = iter(strategy.experimental_distribute_dataset(sparse))
ragged_iter = iter(strategy.experimental_distribute_dataset(ragged))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features = next(sparse_iter)
_, weights = next(ragged_iter)
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(step)
with self.assertRaisesRegex(
ValueError, 'which does not match type input which is SparseTensor.'):
test_fn()
def test_enqueue_wrong_weight_type_for_ragged_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy, include_weights=True)
ragged = self._create_ragged_dataset(strategy)
sparse_iter = iter(strategy.experimental_distribute_dataset(sparse))
ragged_iter = iter(strategy.experimental_distribute_dataset(ragged))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
_, weights = next(sparse_iter)
features = next(ragged_iter)
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(step)
with self.assertRaisesRegex(
ValueError, 'which does not match type input which is RaggedTensor.'):
test_fn()
def test_enqueue_sparse_and_ragged(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
ragged = self._create_ragged_dataset(strategy)
sparse_iter = iter(strategy.experimental_distribute_dataset(sparse))
ragged_iter = iter(strategy.experimental_distribute_dataset(ragged))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
sparse_features = next(sparse_iter)
ragged_features = next(ragged_iter)
features = (sparse_features[0], ragged_features[1], sparse_features[2])
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
with self.assertRaisesRegex(
ValueError, 'Found both SparseTensors and RaggedTensors'):
test_fn()
def test_enqueue_incorrect_structure_for_features(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
sparse_iter = iter(strategy.experimental_distribute_dataset(sparse))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features = next(sparse_iter)
features = (features[0],)
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
# The error here is raised from nest.assert_same_structure
with self.assertRaises(ValueError):
test_fn()
def test_enqueue_incorrect_structure_for_weights(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy, include_weights=True)
sparse_iter = iter(strategy.experimental_distribute_dataset(sparse))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features, weights = next(sparse_iter)
weights = (weights[0],)
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(step)
# The error here is raised from nest.assert_same_structure
with self.assertRaises(ValueError):
test_fn()
def test_enqueue_ragged_tensor(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_sparse_dataset(strategy)
ragged = self._create_ragged_dataset(strategy)
sparse_iter = iter(strategy.experimental_distribute_dataset(sparse))
ragged_iter = iter(strategy.experimental_distribute_dataset(ragged))
@def_function.function
def test_fn():
def get_activations():
return mid_level_api.dequeue()
sparse_features = next(sparse_iter)
ragged_features = next(ragged_iter)
mid_level_api.enqueue(sparse_features, training=False)
sparse_activations = strategy.run(get_activations)
mid_level_api.enqueue(ragged_features, training=False)
ragged_activations = strategy.run(get_activations)
return sparse_activations, ragged_activations
sparse_activations, ragged_activations = test_fn()
# Extact per core numpy arrays and check that both sparse and ragged have
# the same results.
sparse0 = self._get_replica_numpy(sparse_activations, strategy, 0)
ragged0 = self._get_replica_numpy(ragged_activations, strategy, 0)
self.assertAllClose(sparse0, ragged0)
@parameterized.parameters(True, False)
def test_enqueue_with_weights(self, ragged):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
weight = 0.5
if ragged:
dataset = self._create_ragged_dataset(strategy, include_weights=True,
weight=weight)
else:
dataset = self._create_sparse_dataset(strategy, include_weights=True,
weight=weight)
dataset_iter = iter(strategy.experimental_distribute_dataset(dataset))
@def_function.function
def enqueue_and_get(features, weights):
def get_activations():
return mid_level_api.dequeue()
mid_level_api.enqueue(features, weights=weights, training=False)
return strategy.run(get_activations)
features, weights = next(dataset_iter)
# Replace the weight for the second feature by None to test.
weights = (weights[0], None, weights[2])
no_weights_activations = enqueue_and_get(features, weights=None)
weights_activations = enqueue_and_get(features, weights=weights)
# Extact per core numpy arrays.
no_weights0 = self._get_replica_numpy(no_weights_activations, strategy, 0)
weights0 = self._get_replica_numpy(weights_activations, strategy, 0)
# videos table has sum combiner and users table has mean combiner.
# i.e. users table lookups isn't affected by the weights as all the weights
# are the same.
# Tuple entry 0 and 1 are the watched and favorited features from the videos
# table and entry 2 is the friends feature from the users table.
# Note that None was passed as a weight for entry 1 so weight should have no
# effect.
weight = (0.5, 1.0, 1.0)
golden = tuple([no_weight * w for no_weight, w in zip(no_weights0, weight)])
self.assertAllClose(golden, weights0)
@parameterized.parameters([True, False])
def test_enqueue_with_outside_compilation(self, use_mlir):
if use_mlir:
config.enable_mlir_bridge()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(strategy.experimental_distribute_dataset(dataset))
@def_function.function
def enqueue_with_outside_compilation(data):
def get_activations(features):
mid_level_api.enqueue(features, training=False)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(data,))
@def_function.function
def enqueue_without_outside_compilation(data):
def get_activations():
return mid_level_api.dequeue()
mid_level_api.enqueue(data, training=False)
return strategy.run(get_activations)
features = next(dataset_iter)
activations_oc = enqueue_with_outside_compilation(features)
activations = enqueue_without_outside_compilation(features)
# Extact per core numpy arrays.
activations_oc0 = self._get_replica_numpy(activations_oc, strategy, 0)
activations0 = self._get_replica_numpy(activations, strategy, 0)
self.assertAllClose(activations_oc0, activations0)
@parameterized.parameters([True, False])
def test_enqueue_with_outside_compilation_in_control_flow(self, use_mlir):
if use_mlir:
config.enable_mlir_bridge()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(strategy.experimental_distribute_dataset(dataset))
# This is one way to force the enqueue in some control flow. @tf.functions
# aren't inlined in the calling tf.function. An alternative would be to
# place the enqueue in a switch_v2 or something similar.
@def_function.function
def enqueue_fn(features):
mid_level_api.enqueue(features, training=False)
@def_function.function
def enqueue_with_outside_compilation():
def get_activations(features):
enqueue_fn(features)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(next(dataset_iter),))
with self.assertRaisesRegex(
RuntimeError,
'does not match graph which contains TPUReplicateContext'):
enqueue_with_outside_compilation()
def test_enqueue_with_outside_compilation_non_direct_input(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(strategy.experimental_distribute_dataset(dataset))
@def_function.function
def enqueue_with_outside_compilation():
def get_activations(features):
# This inserts a mul operation on the TPU to trigger the direct input
# error.
features = (features[0]*2, features[1]*2, features[2]*2)
mid_level_api.enqueue(features, training=False)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(next(dataset_iter),))
with self.assertRaisesRegex(
ValueError, 'which does not have the `_tpu_input_identity` attr'):
enqueue_with_outside_compilation()
def test_enqueue_with_outside_compilation_auto_mode(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(strategy.experimental_distribute_dataset(dataset))
@def_function.function
def enqueue_with_no_gradient_apply(data):
def get_activations(features):
# Note the lack of setting training=False, so training defaults to true
# here even though we don't have apply gradients.
# We detect the correct mode based on which ops exist that share the
# same 'name'.
mid_level_api.enqueue(features, name='call1')
return mid_level_api.dequeue(name='call1')
return strategy.run(get_activations, args=(data,))
@def_function.function
def enqueue_with_gradient_apply(data):
def get_activations(features):
mid_level_api.enqueue(features, name='call2')
activations = mid_level_api.dequeue(name='call2')
# Apply an all ones gradient
gradients = nest.map_structure(array_ops.ones_like, activations)
mid_level_api.apply_gradients(gradients, name='call2')
return activations
return strategy.run(get_activations, args=(data,))
data = next(dataset_iter)
before_gradient_apply = enqueue_with_gradient_apply(data)
after_gradient_apply = enqueue_with_no_gradient_apply(data)
before_gradient_apply0 = self._get_replica_numpy(before_gradient_apply,
strategy, 0)
after_gradient_apply0 = self._get_replica_numpy(after_gradient_apply,
strategy, 0)
num_replicas = strategy.num_replicas_in_sync
# We are passing a gradient of 1 for all lookups, optimizer is SGD with a
# learning rate of 0.1. Feature 0 and 1 are looked up with a sum combiner
# with the following ids:
# Feature 0: [0, 0, 1], [0, 1, 1], ... repeated over num_replicas
# Feature 1: [0, 1, 1], [0, 0, 1], ... repeated over num_replicas
# i.e. Row 0 and 1 were looked up 3*num_replicas times over all cores and as
# the gradient is 1, the accumulated gradient is 3*num_replicas for each
# position in row 0 and 1 in table.
#
# See comments in test_pass_none_to_apply_gradients for the update to
# Feature 2 and its table.
# The *2 in the next tests are because those rows have 2 lookups vs
# the 1 lookup in the other row.
update = ([[0.3 * num_replicas], [0.3 * num_replicas * 2]],
[[0.3 * num_replicas * 2], [0.3 * num_replicas]],
[[0.1 * num_replicas], [0.1 / 3 * num_replicas]])
golden = tuple([before - np.array(up) for before, up in
zip(before_gradient_apply0, update)])
self.assertAllClose(golden, after_gradient_apply0)
def _create_strategy_and_mid_level(self, optimizer_name):
strategy = self._get_strategy()
with strategy.scope():
if optimizer_name == 'sgd':
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
elif optimizer_name == 'adagrad':
optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
elif optimizer_name == 'adam':
optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
else:
raise ValueError('optimizer is not recognized: ', optimizer_name)
mid_level_api = self._create_mid_level(optimizer=optimizer)
return strategy, mid_level_api, optimizer
@parameterized.parameters(
*itertools.product(
['sgd', 'adagrad', 'adam'],
[True, False]))
def test_embedding(self, optimizer_name, training):
strategy, mid_level_api, optimizer = (
self._create_strategy_and_mid_level(optimizer_name))
dataset = self._create_sparse_dataset(strategy)
dist = strategy.experimental_distribute_dataset(dataset)
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
"""Create and run computation that returns the embedding activations."""
if not training:
activations = mid_level_api.dequeue()
total_loss = _get_total_loss_tensor(activations)
ret_val = [total_loss] + list(activations)
return ret_val
else:
with backprop.GradientTape() as tape:
activations = mid_level_api.dequeue()
tape.watch(activations)
total_loss = _get_total_loss_tensor(activations)
loss_per_replica = total_loss / strategy.num_replicas_in_sync
gradients = tape.gradient(loss_per_replica, activations)
mid_level_api.apply_gradients(gradients)
ret_val = [total_loss] + list(activations)
return ret_val
mid_level_api.enqueue(next(dist_iter), training=training)
result = strategy.run(step)
return result
# Run model.
shard_out_val = test_fn()
# Retrieve TPU weights to CPU.
mid_level_api._retrieve_variables()
# Compute sparse tensors for global batch.
input_data = next(iter(self._create_sparse_dataset(strategy)))
# Check results.
self._check_results(strategy, shard_out_val, training, input_data,
mid_level_api._variables,
optimizer)
def _create_mid_level(self, optimizer=None):
# Create `TPUEmbedding` object.
if optimizer is None:
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
num_replicas = (
distribution_strategy_context.get_strategy().num_replicas_in_sync)
return tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config,
batch_size=self.batch_size * num_replicas,
optimizer=optimizer)
def _create_sparse_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
sparse_features = (
sparse_tensor.SparseTensor(
indices=self.feature_watched_indices,
values=self.feature_watched_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_favorited_indices,
values=self.feature_favorited_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_friends_indices,
values=self.feature_friends_values,
dense_shape=[self.data_batch_size, 3]))
if include_weights:
weights = []
for sparse in sparse_features:
values = (
array_ops.ones_like(sparse.values, dtype=dtypes.float32) * weight)
weights.append(sparse_tensor.SparseTensor(
indices=sparse.indices,
values=values,
dense_shape=sparse.dense_shape))
sparse_features = (sparse_features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(sparse_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_ragged_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
ragged_features = (
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_watched_row_lengths,
values=self.feature_watched_values),
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_favorited_row_lengths,
values=self.feature_favorited_values),
ragged_tensor.RaggedTensor.from_row_lengths(
row_lengths=self.feature_friends_row_lengths,
values=self.feature_friends_values))
if include_weights:
weights = []
for ragged in ragged_features:
weights.append(ragged.with_values(
array_ops.ones_like(ragged.values, dtype=dtypes.float32) * weight))
ragged_features = (ragged_features, tuple(weights))
dataset = dataset_ops.DatasetV2.from_tensors(ragged_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_dense_input_fn(self, strategy, include_weights=False, weight=0.5):
def input_fn(ctx):
del ctx
features = (
constant_op.constant(self.feature_watched_values[-2:],
dtype=dtypes.int32),
constant_op.constant(self.feature_favorited_values[-2:],
dtype=dtypes.int32),
constant_op.constant(self.feature_friends_values[-2:],
dtype=dtypes.int32))
if include_weights:
weights = [array_ops.ones_like(t, dtype=dtypes.float32) * weight
for t in features]
features = (features, tuple(weights))
return dataset_ops.DatasetV2.from_tensors(features).repeat()
return input_fn
def _check_results(self, strategy, shard_out_val, training, input_data,
table_to_variable, optimizer):
num_replicas = strategy.num_replicas_in_sync
# Unpack the values `strategy.run()` returns.
loss = _unpack(strategy, shard_out_val[0])
activation_watched = _unpack(strategy, shard_out_val[1])
activation_favorited = _unpack(strategy, shard_out_val[2])
activation_friends = _unpack(strategy, shard_out_val[3])
# Core 0:
# Calculate the values of embedding activations.
activation_watched_gold0 = np.array([[0, 1, 2, 3], [4, 6, 8, 10]])
activation_favorited_gold0 = np.array([[4, 6, 8, 10], [4, 5, 6, 7]])
# Second row of `activation_friends_gold0` is the mean of the following.
# row 0: 0 1
# row 1: 2 3
# row 2: 4 5
activation_friends_gold0 = np.array([[6, 7], [2, 3]])
loss_gold0 = _compute_loss(activation_watched_gold0,
activation_favorited_gold0,
activation_friends_gold0)
# Add on values from other cores:
# Activations for watched are an alternating sequence of
# activation_watched_gold0 and activation_favorited_gold0.
# For favorited it is the same but in the opposite order.
activation_watched_gold = np.concatenate(
(np.concatenate((np.expand_dims(activation_watched_gold0, axis=0),) *
(num_replicas // 2)),
np.concatenate((np.expand_dims(activation_favorited_gold0, axis=0),) *
(num_replicas // 2))),
axis=1).reshape([self.batch_size * num_replicas, 4])
activation_favorited_gold = np.concatenate(
(activation_watched_gold[self.batch_size:,],
activation_watched_gold[0:self.batch_size,]))
activation_friends_gold = np.concatenate(
(activation_friends_gold0,) * num_replicas)
loss_gold = [loss_gold0] * num_replicas
# Test values.
self.assertAllClose(activation_watched_gold, activation_watched)
self.assertAllClose(activation_favorited_gold, activation_favorited)
self.assertAllClose(activation_friends_gold, activation_friends)
self.assertAllClose(loss_gold, loss)
embedding_table_video_before = np.copy(
np.reshape(self.embedding_values, [8, 4]))
embedding_table_user_before = np.copy(
np.reshape(self.embedding_values, [16, 2]))
global_batch_size = self.batch_size * num_replicas
if training:
gradient_wrt_watched_gold = (2 * activation_watched_gold /
global_batch_size)
gradient_wrt_favorited_gold = (2 * activation_favorited_gold /
global_batch_size)
gradient_wrt_friends_gold = (2 * activation_friends_gold /
global_batch_size)
# Calculate gradients wrt embedding tables.
gradients_wrt_user = (
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_friends_gold,
embedding_table_user_before, input_data[2].indices.numpy(),
input_data[2].values.numpy(), self.table_user.combiner))
gradients_wrt_video = (
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_favorited_gold,
embedding_table_video_before, input_data[1].indices.numpy(),
input_data[1].values.numpy(), self.table_video.combiner) +
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_watched_gold,
embedding_table_video_before, input_data[0].indices.numpy(),
input_data[0].values.numpy(), self.table_video.combiner))
self._check_embedding_and_slot_variables(embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video,
optimizer,
table_to_variable)
def _check_embedding_and_slot_variables(self, embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video,
optimizer,
table_to_variable):
if isinstance(optimizer, tpu_embedding_v2_utils.SGD):
check_fn = self._check_embedding_and_slot_variables_for_sgd
elif isinstance(optimizer, tpu_embedding_v2_utils.Adagrad):
check_fn = self._check_embedding_and_slot_variables_for_adagrad
elif isinstance(optimizer, tpu_embedding_v2_utils.Adam):
check_fn = self._check_embedding_and_slot_variables_for_adam
else:
raise ValueError('optimizer is not recognized: ', type(optimizer))
check_fn(embedding_table_user_before, gradients_wrt_user,
optimizer, table_to_variable[self.table_user.name])
check_fn(embedding_table_video_before, gradients_wrt_video,
optimizer, table_to_variable[self.table_video.name])
def _check_embedding_and_slot_variables_for_sgd(self, embedding_table_before,
gradients,
optimizer,
variables):
embedding_table = np.copy(embedding_table_before)
embedding_table -= optimizer.learning_rate * np.sum(gradients, axis=0)
self.assertAllClose(_get_variable(variables['parameters']).numpy(),
embedding_table)
def _check_embedding_and_slot_variables_for_adagrad(self,
embedding_table_before,
gradients,
optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
accumulator = (
optimizer.initial_accumulator_value + np.sum(gradients, axis=0)**2)
embedding_table -= (
optimizer.learning_rate * np.sum(gradients, axis=0) /
np.sqrt(accumulator))
self.assertAllClose(_get_variable(variable['parameters']).numpy(),
embedding_table)
self.assertAllClose(_get_variable(variable['accumulators']).numpy(),
accumulator)
def _check_embedding_and_slot_variables_for_adam(self, embedding_table_before,
gradients,
optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
g = np.sum(gradients, axis=0)
v = g**2 * (1 - optimizer.beta_2)
m = g * (1 - optimizer.beta_1)
epsilon = optimizer.epsilon
# TPU Embeddings don't have the LR decay factor for Adam.
lr_modifier = 1
embedding_table -= (
m * optimizer.learning_rate * lr_modifier / (np.sqrt(v) + epsilon))
self.assertAllClose(_get_variable(variable['parameters']).numpy(),
embedding_table, rtol=1e-4)
self.assertAllClose(_get_variable(variable['momenta']).numpy(),
m, rtol=1e-4)
self.assertAllClose(_get_variable(variable['velocities']).numpy(),
v, rtol=1e-4)
def _get_replica_numpy(self, structured, strategy, replica_id):
def select_replica(x):
x = strategy.experimental_local_results(x)
if len(x) == 1:
return x.numpy()
return x[replica_id].numpy()
return nest.map_structure(select_replica, structured)
def test_dense_lookup(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
input_fn = self._create_dense_input_fn(strategy)
dist = strategy.experimental_distribute_datasets_from_function(input_fn)
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(dist_iter), training=False)
return strategy.run(step)
# Run model.
shard0 = self._get_replica_numpy(test_fn(), strategy, 0)
# embedding_values is a linear list, so we reshape to match the correct
# shape of the corresponding table before performing the lookup.
numpy_videos = np.reshape(self.embedding_values, (8, 4))
numpy_users = np.reshape(self.embedding_values, (16, 2))
golden = ((numpy_videos[self.feature_watched_values[-2:]],
numpy_videos[self.feature_favorited_values[-2:]],
numpy_users[self.feature_friends_values[-2:]]))
self.assertAllClose(shard0, golden)
def test_variable_learning_rate(self):
num_steps = 10
num_steps_float = float(num_steps)
starting_lr = 1.0
ending_lr = 0.5
strategy = self._get_strategy()
num_replicas = strategy.num_replicas_in_sync
# Create model with Keras.
with strategy.scope():
step_counter = tf_variables.Variable(0.0, dtypes.float32)
def lr_function():
return gen_math_ops.maximum(
ending_lr,
starting_lr + ((ending_lr - starting_lr) * step_counter) /
num_steps_float)
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=lr_function)
table_config = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=num_replicas,
dim=4,
initializer=init_ops_v2.Constant(np.zeros((num_replicas, 4))),
combiner='sum', name='table')
mid_level_api = tpu_embedding_v2.TPUEmbedding(
feature_config={
'feature': tpu_embedding_v2_utils.FeatureConfig(
table=table_config, name='feature')},
batch_size=num_replicas,
optimizer=optimizer)
feature = {'feature': constant_op.constant([0], dtype=dtypes.int32)}
def input_fn(ctx):
del ctx
return dataset_ops.DatasetV2.from_tensors(feature).repeat()
dist = strategy.experimental_distribute_datasets_from_function(input_fn)
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
with backprop.GradientTape() as tape:
activations = mid_level_api.dequeue()
tape.watch(activations)
result = math_ops.reduce_sum(activations['feature'])
loss = result / num_replicas
grads = tape.gradient(loss, activations)
mid_level_api.apply_gradients(grads)
return activations['feature']
mid_level_api.enqueue(next(dist_iter), training=True)
return strategy.run(step)
# Run model.
results = []
for _ in range(num_steps):
result = test_fn()
results.append(_unpack(strategy, result))
step_counter.assign_add(1.0)
# Table is 2 elements wide, per-replica batch size of 1, with id 0.
# Loss for the gradient is the sum of the entries divided by the number of
# replicas. Thus the per replica gradient is 1/#of replicas for row 0 and no
# other updates. The reduced gradient is therefore 1.
# Learning rate schedule over num_steps steps:
# 1.0 0.95 0.9 0.85 0.8 ...
# Since use SGD and the gradient is one, the first row of the table is
# [0, 0] [-1.0, -1.0] [-1.95, -1.95] [-2.85, -2.85] ... (the negative
# partial sums of the above).
learning_rates = [starting_lr - (starting_lr - ending_lr) / num_steps * j
for j in range(num_steps)]
cumsum = [sum(learning_rates[0:j]) for j in range(num_steps)]
goldens = [[[-cumsum[i]] * table_config.dim] * num_replicas
for i in range(10)]
self.assertAllClose(results, goldens)
@parameterized.parameters([True, False])
def test_optimizer_with_slot_creation_fn(self, use_tpu):
def slot_creation_fn(table, slot_names):
slots = {}
for slot in slot_names:
slots[slot] = tf_variables.Variable(
name='{}_{}'.format(table.name, slot),
initial_value=functools.partial(
init_ops_v2.Zeros(), shape=table.shape, dtype=dtypes.float32),
trainable=False)
return slots
optimizer = tpu_embedding_v2_utils.Adagrad(
learning_rate=0.1,
slot_variable_creation_fn=slot_creation_fn)
if use_tpu:
strategy = self._get_strategy()
else:
strategy = distribution_strategy_context.get_strategy()
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config,
batch_size=self.batch_size * num_replicas,
optimizer=optimizer)
video_accumulator = mid_level._variables['video']['accumulators']
user_accumulator = mid_level._variables['user']['accumulators']
if use_tpu:
# To check the table contents (ensure that it is zero rather than the
# normal initial accumulator value specified to in the optimizer config),
# we need to select the underlying table variable on TPU.
# We only have one shard on Forge.
video_accumulator = video_accumulator.variables[0]
user_accumulator = user_accumulator.variables[0]
self.assertAllClose(video_accumulator.numpy(),
np.zeros((self.table_video.vocabulary_size,
self.table_video.dim)))
self.assertAllClose(user_accumulator.numpy(),
np.zeros((self.table_user.vocabulary_size,
self.table_user.dim)))
def test_optimizer_with_slot_creation_fn_non_partial(self):
def slot_creation_fn(table, slot_names):
slots = {}
for slot in slot_names:
# Note that we don't pass functools.partial here, so on TPU we can't
# extract the shape. We expect the error below.
slots[slot] = tf_variables.Variable(
name='{}_{}'.format(table.name, slot),
initial_value=init_ops_v2.Zeros()(shape=table.shape,
dtype=dtypes.float32),
trainable=False)
return slots
optimizer = tpu_embedding_v2_utils.Adagrad(
learning_rate=0.1,
slot_variable_creation_fn=slot_creation_fn)
strategy = self._get_strategy()
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
with self.assertRaisesRegex(ValueError,
'Unable to extract initializer function'):
tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config,
batch_size=self.batch_size*num_replicas,
optimizer=optimizer)
def test_sequence_embeddings(self):
feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched',
max_sequence_length=2),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited',
max_sequence_length=2),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends',
max_sequence_length=3))
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
strategy = self._get_strategy()
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config=feature_config,
batch_size=self.batch_size * num_replicas,
optimizer=optimizer)
dataset = self._create_sparse_dataset(strategy)
data = next(iter(strategy.experimental_distribute_dataset(dataset)))
@def_function.function
def embedding_and_set_gradients(data):
def tpu_fn():
activations = mid_level.dequeue()
mid_level.apply_gradients(nest.map_structure(array_ops.ones_like,
activations))
return activations
mid_level.enqueue(data)
return strategy.run(tpu_fn)
@def_function.function
def embedding_only(data):
def tpu_fn():
return mid_level.dequeue()
mid_level.enqueue(data)
return strategy.run(tpu_fn)
# Only check core 0.
before_update = self._get_replica_numpy(
embedding_and_set_gradients(data), strategy, 0)
after_update = self._get_replica_numpy(embedding_only(data), strategy, 0)
# For videos table, row 0 and row 1 are looked up 3*num_replicas times as
# they occur 3 times per replica (considering the features 0 and 1 which are
# both looked up in the videos table).
# Feature 0 has ids [0, 0, 1], [0, 1, 1], ... repeated over num_replicas
# Feature 1 has ids [0, 1, 1], [0, 0, 1], ... repeated over num_replicas
# This means that both rows 0 and 1 get a -0.1*3*num_replicas update
# For users table, each row is looked up twice:
# Feature 2 has ids [3, 0, 1, 2], .. repeated over num_replicas
# This means that we get a -0.1*num_replicas update to the third feature.
# In general this means that after the update, if we lookup feature 0 and 1
# the values will be 0.3*num_replicas lower per entry and for feature 2 they
# will be 0.1*num_replicas lower.
# The one issue that that these lookups contain padding values.
# For core 0, we get the first 2 elements of the 4 element batch.
# For feature 0, the indices are [[0, 0], [1, 0], [1, 1]] with max sequence
# length of 2, which means that [0, 1] will be 0s.
# For feature 1, the indices are [[0, 0], [0, 1], [1, 0]] with max sequence
# length of 2, which means that [1, 1] will be 0s.
# For feature 2, the indices are [[0, 0], [1, 0], [1, 1], [1, 2]] with max
# sequence length of 3, which means that [0, 1], [0, 2] will be 0s.
# The following masks represent that so that we only apply the above updates
# to the non-padding rows:
masks = (
np.array([[[1], [0]], [[1], [1]]]),
np.array([[[1], [1]], [[1], [0]]]),
np.array([[[1], [0], [0]], [[1], [1], [1]]]))
per_row_update = (0.3 * num_replicas,
0.3 * num_replicas,
0.1 * num_replicas)
golden = tuple([before - update * mask for before, update, mask in
zip(before_update, per_row_update, masks)])
self.assertAllClose(golden, after_update)
def _compute_gradients_wrt_embedding_table(batch_size,
gradient_wrt_activation,
embedding_table,
feature_indices,
feature_values,
combiner,
max_sequence_length=0):
"""Compute gradients wrt embedding_table.
Args:
batch_size: `int`, batch size.
gradient_wrt_activation: `np.array` with shape `batch_size` by
embedding `dimension`.
embedding_table: `np.array` with shape `vocabulary_size` by embedding
`dimension`.
feature_indices: `indices` as used to construct `SparseTensor`.
feature_values: `values` as used to construct `SparseTensor`.
combiner: `String`, 'mean' or 'sum'.
max_sequence_length: If non-zero, a sequence feature with the given length.
Returns:
Gradients wrt `embedding_table`, an `np.array`s with shape
`batch_size` by `vocabulary_size` by
embedding `dimension`.
Raises:
ValueError: if `combiner` is not one of 'mean' or 'sum'.
"""
if combiner not in ('mean', 'sum'):
raise ValueError('`combiner` must be mean or sum; got {}.'.format(combiner))
grads = []
for i in range(batch_size):
grad = np.zeros_like(embedding_table)
count = 0
for (batch_i, seq_index), vocabulary_id in zip(feature_indices,
feature_values):
if batch_i == i:
count += 1
if max_sequence_length > 0:
if seq_index < max_sequence_length:
grad[vocabulary_id, :] += gradient_wrt_activation[i, seq_index, :]
else:
grad[vocabulary_id, :] += gradient_wrt_activation[i, :]
if combiner == 'mean' and not max_sequence_length:
grad = grad / count
grads.append(grad)
return np.stack(grads)
def _unpack(strategy, per_replica_output):
per_replica_output = strategy.experimental_local_results(per_replica_output)
per_replica_output = array_ops.concat(per_replica_output, axis=0).numpy()
return per_replica_output
def _get_total_loss_tensor(activations):
losses = []
for activation in activations:
losses.append(
math_ops.reduce_mean(
math_ops.reduce_sum(
gen_math_ops.squared_difference(activation, 0), 1)))
total_loss = array_ops.expand_dims_v2(sum(losses), 0)
return total_loss
def _compute_loss(activation_watched, activation_favorited, activation_friends):
watched_loss = np.mean(np.sum(activation_watched**2, axis=1))
if len(activation_favorited.shape) == 2:
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=1))
else:
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=(1, 2)))
if len(activation_friends.shape) == 2:
friends_loss = np.mean(np.sum(activation_friends**2, axis=1))
else:
friends_loss = np.mean(np.sum(activation_friends**2, axis=(1, 2)))
loss = watched_loss + favorited_loss + friends_loss
return loss
def _get_tmpdir(name, subdir=''):
segments = [FLAGS.model_dir, name] + ([subdir] if subdir else [])
return os.path.join(*segments)
def _get_variable(variable):
if isinstance(variable, tpu_embedding_v2.TPUShardedVariable):
return variable.variables[0]
return variable
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-current] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pickle
import psycopg2
import psycopg2.extras
import more_itertools as more
CHUNK_SIZE = 300
def lookup(db_url, all_ids, query, chunk_size=CHUNK_SIZE):
assert all_ids, "Must give ids to lookup"
data = {}
conn = psycopg2.connect(db_url)
for chunk in more.chunked(all_ids, chunk_size):
count = 0
ids = tuple(chunk)
assert sorted(set(ids)) == sorted(ids)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(query, (ids,))
for result in cur:
yield result
count += 1
if count != len(ids):
raise ValueError("Found %i of %i" % (count, len(ids)))
cur.close()
conn.close()
def as_mapping(db_url, data, query, key="id", **kwargs):
mapping = {}
for result in lookup(db_url, data, query, **kwargs):
pid = result[key]
assert pid not in mapping
mapping[pid] = result
return mapping
def write_mapping(db_url, data, query, handle, **kwargs):
values = as_mapping(db_url, data, query, **kwargs)
pickle.dump(values, handle)
def load_mapping(handle):
return pickle.load(handle)
|
from fastapi import APIRouter
from example_app.core.models.output import OutputExample
from example_app.core.models.input import InputExample
router = APIRouter()
@router.get("/example", tags=["example get"])
def example_get():
"""
Say hej!
This will greet you properly
And this path operation will:
* return "hej!"
"""
return {"msg": "Hej!"}
@router.post("/example", response_model=OutputExample, tags=["example post"])
def example_endpoint(inputs: InputExample):
"""
Multiply two values
This will multiply two inputs.
And this path operation will:
* return a*b
"""
return {"a": inputs.a, "b": inputs.b, "result": inputs.a * inputs.b}
|
import asyncio
from predcrash_utils.commons import get_asset_root, get_file_content
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import conda
import os
from logzero import logger as LOGGER
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
from mpl_toolkits.basemap import Basemap
import matplotlib as mpl
mpl.rcParams['font.size'] = 10.
mpl.rcParams['font.family'] = 'Comic Sans MS'
mpl.rcParams['axes.labelsize'] = 8.
mpl.rcParams['xtick.labelsize'] = 6.
mpl.rcParams['ytick.labelsize'] = 6.
class Map_france(object):
def __init__(self, x1=-6., x2=10., y1=41., y2=51.5, figsize=(8, 8)):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.figsize = figsize
@staticmethod
async def make_data(datasets:list, years:list):
cfg = await get_asset_root()
list_directory = []
for year in years:
for dataset in datasets:
list_directory.append(f'{dataset}_{year}')
list_df = []
for directory in list_directory:
dir = await get_file_content(cfg, directory)
df = pd.read_csv(dir, encoding='latin1', index_col='Num_Acc')
list_df.append(df)
df_total = pd.concat(list_df)
return df_total
async def plot_data(self, datasets, years, start_date=None, end_date=None, delimitation='gadm36_FRA_3'):
data = await Map_france.make_data(datasets, years)
LOGGER.info(data.head())
lat = data['lat'].values
lat = [i / 100000 for i in lat]
lon = data['long'].values
lon = [i / 100000 for i in lon]
fig = plt.figure(figsize=self.figsize)
m = Basemap(resolution='i', projection='merc', llcrnrlat=self.y1, urcrnrlat=self.y2, llcrnrlon=self.x1, urcrnrlon=self.x2,
lat_ts=(self.x1 + self.x2) / 2)
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.readshapefile(name="France", shapefile="/Users/amarchand/Downloads/gadm36_FRA_shp/{}".format(delimitation))
m.shadedrelief()
m.drawcoastlines(color='gray')
m.drawcountries(color='gray')
m.drawstates(color='gray')
# 2. scatter city data, with color reflecting population
# and size reflecting area
m.scatter(lon, lat, latlon=True,
marker='D', color='m', alpha=0.01)
plt.show()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
g = Map_france()
df = loop.run_until_complete(g.plot_data(['caracteristiques'],[str(i) for i in range(2010,2011)]))
LOGGER.info(df)
|
#pragma error
#pragma repy
fro = open("h:lo","r")
fro.close()
|
from collections import defaultdict, deque
from copy import deepcopy
from random import Random
from string import ascii_lowercase as lowercase
import sys
import traceback
from snakegame.colour import hash_colour
from snakegame import common
class Engine(object):
def __init__(
self,
rows, columns, n_apples,
wrap=True, results=False,
random=None,
*args, **kwargs
):
super(Engine, self).__init__(*args, **kwargs)
if random is None:
random = Random()
self.random = random
self.wrap = wrap
self.bots = {}
self.results = None
if results:
self.results = open('results.csv', 'a+')
self.new_game(rows, columns, n_apples)
def get_random_position(self):
x = self.random.randrange(0, self.columns)
y = self.random.randrange(0, self.rows)
return (x, y)
def replace_random(self, old, new):
for i in range(self.rows * self.columns):
x, y = self.get_random_position()
if self.board[y][x] == old:
self.board[y][x] = new
return x, y
def new_game(self, rows, columns, n_apples):
self.game_ticks = 0
self.game_id = self.random.randint(0, 1000000)
self.letters = list(lowercase)
self.letters.reverse()
self.rows = rows
self.columns = columns
self.messages_by_team = defaultdict(dict)
# make board
self.board = [[common.EMPTY for x in range(columns)] for y in range(rows)]
for i in range(n_apples):
x, y = self.get_random_position()
self.board[y][x] = common.APPLE
def add_bot(self, bot, team=None):
"""
A bot is a callable object, with this method signature:
def bot_callable(
board=[[cell for cell in row] for row in board],
position=(snake_x, snake_y)
):
return random.choice('RULD')
If team is not None, this means you will get a third parameter,
containing messages from the other bots on your team.
"""
letter = self.letters.pop()
name = bot.__name__
colour = hash_colour(name)
position = self.replace_random(common.EMPTY, letter.upper())
if position is None:
raise KeyError("Could not insert snake into the board.")
self.bots[letter] = [bot, colour, deque([position]), team]
return letter
def remove_bot(self, letter):
letter = letter.lower()
time_score = self.game_ticks
for row in self.board:
for x, cell in enumerate(row):
if cell.lower() == letter:
row[x] = common.EMPTY
bot = self.bots[letter]
del self.bots[letter]
if not self.results:
try:
name = bot[0].__name__
print("%s died with %d length at time %d\n%d bots remaining" % (name, len(bot[2]), time_score, len(self.bots)))
if not len(self.bots):
print("Round over. %s wins!\n\n" % name)
except AttributeError:
pass
return
try:
name = bot[0].__name__
except AttributeError:
pass
else:
apple_score = len(bot[2])
self.results.write('%s,%s,%s,%s\n' % \
(self.game_id, name, apple_score, time_score))
self.results.flush()
def update_snakes(self):
self.game_ticks += 1
#_ = deepcopy()
for letter, (bot, colour, path, team) in list(self.bots.items()):
board = deepcopy(self.board)
try:
x, y = path[-1]
if team is None:
d = bot(board, (x, y))
else:
messages = self.messages_by_team[team]
d, message = bot(board, (x, y), messages)
assert isinstance(message, str), \
"Message should be a byte string, not %s (%r)." % (
type(message),
message,
)
messages[letter] = message
d = d.upper()
assert d in common.directions, "Return value should be 'U', 'D', 'L' or 'R'."
# Get new position.
dx, dy = common.directions[d]
nx = x + dx
ny = y + dy
if self.wrap:
ny %= self.rows
nx %= self.columns
else:
if ny < 0 or ny >= self.rows or nx < 0 or nx >= self.columns:
self.remove_bot(letter)
continue
oldcell = self.board[ny][nx]
if common.is_vacant(oldcell):
# Move snake forward.
self.board[ny][nx] = letter.upper()
path.append((nx, ny))
# Make old head into body.
self.board[y][x] = letter.lower()
if oldcell == common.APPLE:
# Add in an apple to compensate.
self.replace_random(common.EMPTY, common.APPLE)
else:
# Remove last part of snake.
ox, oy = path.popleft()
self.board[oy][ox] = common.EMPTY
else:
self.remove_bot(letter)
except:
print("Exception in bot %s (%s):" % (letter.upper(), bot))
print('-'*60)
traceback.print_exc()
print('-'*60)
self.remove_bot(letter)
|
from peewee import *
db = SqliteDatabase('movie.db')
class User(Model):
id = IntegerField(primary_key=True)
gender = CharField()
age = IntegerField()
occupation = IntegerField()
zip_code = CharField()
class Meta:
database = db
def __str__(self):
return 'User {}, gender: {}, age: {}, occupation: {}, zip_code: {}'.format(self.id, self.gender, self.age,
self.occupation, self.zip_code)
class Movie(Model):
id = IntegerField(primary_key=True)
title = CharField()
genres = CharField()
class Meta:
database = db
def __str__(self):
return 'Movie {}, title: {}, genres: {}'.format(self.id, self.title, self.genres)
class Rating(Model):
user_id = IntegerField(index=True)
movie_id = IntegerField(index=True)
rating = IntegerField()
timestamp = IntegerField()
class Meta:
database = db
def __str__(self):
return 'Rating, user id {}, movie id: {}, rating: {}, timestamp: {}'.format(self.user_id, self.movie_id,
self.rating,
self.timestamp)
class AverageRating(Model):
movie_id = IntegerField(primary_key=True)
rating = FloatField()
class Meta:
database = db
def __str__(self):
return 'Average Rating, movie id: {}, rating {}'.format(self.movie_id, self.rating)
|
white = (255,255,255)
blue = (0, 0, 255)
red = (255, 0, 0)
green = (0, 255, 0)
orange = (255, 165, 0)
yellow = (255, 255, 0)
cyan = (0, 255, 255)
magenta = (255, 0, 255)
brown = (165, 42, 42)
grey = (190, 190, 190)
color_count = 10
max_color_index = 9
min_color_index = 0
color_presets = [white, red, green, blue, orange, grey, yellow, cyan, magenta, brown]
color_index = 0
color = color_presets[0]
|
from lxml.html.clean import Cleaner
class SafeHTML(object):
def __init__(self):
self.cleaner = Cleaner(
scripts=True,
javascript=True,
style=True,
page_structure=True,
annoying_tags=True,
remove_unknown_tags=True)
def render(self, src):
return self.cleaner.clean_html(src)
|
# from .mobilenet_model_im import mobilenet
from .mobilenetv2 import MobileNet_V2
from .mobilenetv3 import MobileNet_V3
|
import time
import grove_i2c_color_sensor
# Open connection to sensor
color_sensor = grove_i2c_color_sensor.GroveI2CColorSensor()
# Perform continuous integration with predefined duration of 100ms
color_sensor.use_continuous_integration(100)
# Set gain to 16x
color_sensor.set_gain_and_prescaler(16)
# Start integration
color_sensor.start_integration()
time.sleep(.1)
if color_sensor.is_integration_complete():
print ("Continuous integration complete. Read color:")
color = color_sensor.read_rgbc()
print("RGB: {},{},{} - Clear {}".format(color[0], color[1], color[2], color[3]))
color = color_sensor.read_xy()
print("xy: {},{}".format(color[0], color[1]))
color = color_sensor.read_color_name()
print("Closest color match: {}".format(color))
else:
print("Continuous integration incomplete")
# Stop integration before changing settings
color_sensor.stop_integration()
# Perform manual integration
color_sensor.use_manual_integration()
# Set gain to 4x
color_sensor.set_gain_and_prescaler(4)
# Integrate during 200ms
color_sensor.start_integration()
time.sleep(0.2)
color_sensor.stop_integration()
if color_sensor.is_integration_complete():
print ("Manual integration complete. Read color:")
color = color_sensor.read_rgbc()
print("RGB: {},{},{} - Clear {}".format(color[0], color[1], color[2], color[3]))
color = color_sensor.read_xy()
print("xy: {},{}".format(color[0], color[1]))
color = color_sensor.read_color_name()
print("Closest color match: {}".format(color))
else:
print("Manual integration incomplete")
|
import os
import numpy as np
import shutil
from skimage import data, color, exposure, io
from skimage.feature import hog
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import classification_report,accuracy_score
from skimage.transform import rescale, resize
#Following function creates directories (test, vali,train) for each class
def diractory_creation(root_dir = './Cyclone_Wildfire_Flood_Earthquake_Database', class_name='Cyclone'):
os.makedirs(root_dir +'/train/'+class_name)
os.makedirs(root_dir +'/val/'+class_name)
os.makedirs(root_dir +'/test/'+class_name)
#Following function divides data into (test, vali,train) for each class
def create_partition(root_dir='Cyclone_Wildfire_Flood_Earthquake_Database',currentCls = 'Cyclone',final_dir='.'):
src = root_dir +"/"+ currentCls # Folder to copy images from
#print (src)
allFileNames = os.listdir(src)
#print (allFileNames)
np.random.shuffle(allFileNames)
train_FileNames, val_FileNames, test_FileNames = np.split(np.array(allFileNames),[int(len(allFileNames)*0.7), int(len(allFileNames)*0.85)])
train_FileNames = [src+'/'+ name for name in train_FileNames.tolist()]
val_FileNames = [src+'/' + name for name in val_FileNames.tolist()]
test_FileNames = [src+'/' + name for name in test_FileNames.tolist()]
print('Total images=> ', len(allFileNames))
print('Training=> ', len(train_FileNames))
print('Validation=> ', len(val_FileNames))
print('Testing=> ', len(test_FileNames))
# Copy-pasting images
print ('for class ', currentCls)
unique = 0
for name in train_FileNames:
new_name=final_dir+"/train/"+currentCls+"/"+str(unique)+".PNG"
unique=unique+1
img = io.imread(name, as_gray=False)
roi = resize(img,(100,100),anti_aliasing=True)
io.imsave(new_name, roi)
unique=0
for name in val_FileNames:
new_name=final_dir+"/val/"+currentCls+"/"+str(unique)+".PNG"
unique=unique+1
img = io.imread(name, as_gray=False)
roi = resize(img,(100,100),anti_aliasing=True)
io.imsave(new_name, roi)
unique=0
for name in test_FileNames:
new_name=final_dir+"/test/"+currentCls+"/"+str(unique)+".PNG"
unique=unique+1
img = io.imread(name, as_gray=False)
roi = resize(img,(100,100),anti_aliasing=True)
io.imsave(new_name, roi)
diractory_creation('.','Cyclone')
diractory_creation('.','Earthquake')
diractory_creation('.','Flood')
diractory_creation('.','Wildfire')
create_partition('Cyclone_Wildfire_Flood_Earthquake_Database','Cyclone',".")
create_partition('Cyclone_Wildfire_Flood_Earthquake_Database','Earthquake',".")
create_partition('Cyclone_Wildfire_Flood_Earthquake_Database','Flood',".")
create_partition('Cyclone_Wildfire_Flood_Earthquake_Database','Wildfire',".") |
# Imports
import os
from ..utils.blender import add_group
from .Constraint import ObConstraint, CamConstraint
from .MappedClass import MappedClass
# Should be moved, along with test below
#from .Object import Object
#from .Camera import Camera
#from .Scene import Scene
#from .Sky import Sky
try:
import bpy
import mathutils as bmu
is_blender = True
except ImportError:
is_blender = False
class Background(MappedClass):
"""Backgrounds for scenes"""
def __init__(self, name='DummyBackground', fname=None, n_vertices=None, n_faces=None,
type='Background', wordnet_label=None, real_world_size=None, lens=50.,
semantic_category=None, object_semantic_category='all', sky_semantic_category='all',
camera_constraints=None, object_constraints=None, obstacles=None,
_id=None, _rev=None, dbi=None):
"""Class to store Backgrounds (floor, walls, maybe lights, + constraints on objects)
A Background consists of a floor, background objects (walls, maybe trees, etc), maybe
lights, and constraints that partly determine the locations of objects, actions*,
and cameras in a scene. Each Background is stored as a group in a .blend file.
All elements of the background (floor, walls, buildings, emtpy objects defining
bounds of space, what have you) should be in this group.
Parameters
----------
name: string
a unique identifier for the BG in question. Either a string
(interpreted to be the name of the BG group) or a lambda function
(See bvpLibrary "getSceneComponent" function)
"""
# Quick setting of attributes
inpt = locals()
self.type = 'Background'
for k, v in inpt.items():
if not k in ('self', 'type'):
if v == 'None':
setattr(self, k, None)
else:
setattr(self, k, v)
if isinstance(self.real_world_size, (list, tuple)):
self.real_world_size = self.real_world_size[0]
self._temp_fields = []
self._data_fields = []
self._db_fields = []
def place(self, scn=None):
"""
Adds background to Blender scene
"""
# Make file local, if it isn't already
if self.path is not None:
#
self.cloud_download()
if not scn:
scn = bpy.context.scene # Get current scene if input not supplied
if self.name is not 'DummyBackground':
# Add group of mesh object(s)
print('{}, {}'.format(self.path, self.name))
add_group(self.name, self.fname, self.path)
else:
# Potentially add default background (with ground plane, other render settings...)
print("BG is empty!")
@classmethod
def from_blender(cls, context, dbi):
"""Create an Action from a selected object in Blender.
This function only works within an active Blender session. The selected object must be an armature with
an action linked to it.
Parameters
----------
context : bpy context
context for determining selected object, etc
"""
raise Exception("Still WIP!")
# Idiot-proofing
assert is_blender, "from_blender() only works within an active blender session."
# Get relevant blender objects
wm = context.window_manager
scn = context.scene
ob = context.object
bvpu.blender.grab_only(ob)
# Compute parameters
## GET GROUP, USE WORDNET LABELS FOR GROUP
grp = 0 # FIX ME
## WordNet labels
wordnet_label = [s.name for s in grp.Background.wordnet_label] # or whatever
semantic_category = [s.name for s in grp.Background.semantic_category] # OR whatever
# TODO: n_vertices, n_faces, lens (interactive manual input?), constraints.
## Parent file
#pfile = act.Action.parent_file
# The above value (pfile) is ignored for now. Need to eventually implement some way to take the contents
# of the current file (group/action/whatever) and save them (append them) to another specfied file
# in the database. Currently NOT IMPLEMENTED.
thisfile = os.path.dirname(bpy.data.filepath) #if len(bpy.data.filepath)>0 else pfile
if thisfile=="":
# Require saving in db-appropriate location
raise NotImplementedError("Please save this file into %s before trying to save to database."%(os.path.join(dbpath, 'Background/')))
# Construct bvp Action
bg = cls.__new__(cls)
bg.__init__(name=grp.name,
fname=thisfile,
n_vertices=n_vertices,
n_faces=n_faces,
lens=lens,
semantic_category=grp.Background.semantic_category,
object_semantic_category=grp.Background.object_semantic_category,
sky_semantic_category=grp.Background.sky_semantic_category,
camera_constraint=None, # Save these? Unclear...
object_constraints=None, # same
obstacles=None, # same
dbi=dbi
)
return bg
# def test_background(self, frames=(1, 1), object_list=(), n_objects=0, edge_dist=0., object_overlap=0.50):
# """
# Tests object / camera constraints to see if they are working
# ** And shadows??
# Should be grouped with other testing functions, not here. Move.
# """
# Cam = Camera(frames=frames)
# Sky = Sky('*'+self.sky_semantic_category[0], Lib) # Choose a sky according to semantic category of BG ## RELIES ON ONLY ONE ENTRY FOR SKY SEMANTIC CAT! Should be most specific specifier...
# scn = Scene(0, BG=self, Cam=Cam, Sky=Sky, FrameRange=frames)
# if not object_list and not n_objects:
# object_list = [Object('*animal', Lib, size3D=None), Object('*vehicle', Lib, size3D=None), Object('*appliance', Lib, size3D=None)]
# n_objects = 0
# elif not object_list and n_objects:
# object_list = [Object(None, None, size3D=None) for x in range(n_objects)]
# scn.populate_scene(object_listist=object_list, ResetCam=True, RaiseError=True, nIter=100, edge_dist=edge_dist, object_overlap=object_overlap)
# if is_blender:
# RO = RenderOptions()
# scn.Create(RO)
# # Add spheres if there are blank objects:
# uv = bpy.ops.mesh.primitive_uv_sphere_add
# for o in range(n_objects):
# print('Sz of obj %d = %.2f'%(o, scn.Obj[o].size3D))
# ObSz = scn.Obj[o].size3D/2.
# pos = bmu.Vector(scn.Obj[o].pos3D) + bmu.Vector([0, 0, ObSz])
# uv(location=pos, size=ObSz)
def __repr__(self):
rstr = ('\nbvp Background {name}\n'
' File: {fname}\n'
' [{sem_cat}]\n'
' [{wn_lab}]\n'
' Size: {sz}, Lens: {lens}\n'
' Vertices: {verts}, Faces: {face}\n'
' Skies allowed: {skies}\n'
' Objects allowed: {obj}\n'
)
sem_cat = [] if self.semantic_category is None else self.semantic_category
wn_lab = [] if self.wordnet_label is None else self.wordnet_label
skies = [] if self.sky_semantic_category is None else self.sky_semantic_category
obj = [] if self.object_semantic_category is None else self.object_semantic_category
out = rstr.format(name=self.name, fname=self.fname,
sem_cat=', '.join(sem_cat),
wn_lab =', '.join(wn_lab),
sz=self.real_world_size, lens=self.lens,
verts=self.n_vertices, face=self.n_faces,
skies=', '.join(skies), obj=', '.join(obj))
return(out)
|
import os
import dj_database_url
### Basic config
BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
DEBUG = TEMPLATE_DEBUG = True
SITE_ID = 1
SECRET_KEY = 'its-a-secret-to-everybody'
# Until Sentry works on Py3, do errors the old-fashioned way.
ADMINS = []
# General project information
# These are available in the template as SITE_INFO.<title>
SITE_VARIABLES = {
'site_name': 'Python.org',
'site_descript': 'The official home of the Python Programming Language',
}
### Databases
DATABASES = {
'default': dj_database_url.config(default='postgres:///python.org')
}
### Locale settings
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = 'Y-m-d'
### Files (media and static)
MEDIA_ROOT = os.path.join(BASE, 'media')
MEDIA_URL = '/m/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(BASE, 'static-root')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE, 'static'),
]
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
### Authentication
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
SOCIALACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_EMAIL_VERIFICATION = True
SOCIALACCOUNT_QUERY_EMAIL = True
### Templates
TEMPLATE_DIRS = [
os.path.join(BASE, 'templates')
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
"django.contrib.messages.context_processors.messages",
"pydotorg.context_processors.site_info",
"pydotorg.context_processors.url_name",
]
### URLs, WSGI, middleware, etc.
ROOT_URLCONF = 'pydotorg.urls'
MIDDLEWARE_CLASSES = (
'pydotorg.middleware.AdminNoCaching',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pages.middleware.PageFallbackMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
AUTH_USER_MODEL = 'users.User'
WSGI_APPLICATION = 'pydotorg.wsgi.application'
### Apps
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
'django_comments_xtd',
'jsonfield',
'pipeline',
'sitetree',
'timedelta',
'imagekit',
'haystack',
'honeypot',
'users',
'boxes',
'cms',
'companies',
'feedbacks',
'community',
'jobs',
'pages',
'sponsors',
'successstories',
'events',
'minutes',
'peps',
'blogs',
'downloads',
'codesamples',
'allauth',
'allauth.account',
'allauth.socialaccount',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.twitter',
# Tastypie needs the `users` app to be already loaded.
'tastypie',
]
# Fixtures
FIXTURE_DIRS = (
os.path.join(BASE, 'fixtures'),
)
### Testing
SKIP_NETWORK_TESTS = True
### Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
### Development
DEV_FIXTURE_URL = 'https://www.python.org/m/fixtures/dev-fixtures.json.gz'
### Comments
COMMENTS_APP = 'django_comments_xtd'
COMMENTS_XTD_MAX_THREAD_LEVEL = 0
COMMENTS_XTD_FORM_CLASS = "jobs.forms.JobCommentForm"
### Honeypot
HONEYPOT_FIELD_NAME = 'email_body_text'
HONEYPOT_VALUE = 'write your message'
### Blog Feed URL
PYTHON_BLOG_FEED_URL = "http://feeds.feedburner.com/PythonInsider"
PYTHON_BLOG_URL = "http://blog.python.org"
### Registration mailing lists
MAILING_LIST_PSF_MEMBERS = "[email protected]"
### PEP Repo Location
PEP_REPO_PATH = ''
### Fastly ###
FASTLY_API_KEY = False # Set to Fastly API key in production to allow pages to
# be purged on save
# Jobs
JOB_THRESHOLD_DAYS = 90
JOB_FROM_EMAIL = '[email protected]'
### Pipeline
from .pipeline import (
PIPELINE_CSS, PIPELINE_JS,
PIPELINE_COMPILERS,
PIPELINE_SASS_BINARY, PIPELINE_SASS_ARGUMENTS,
PIPELINE_CSS_COMPRESSOR, PIPELINE_JS_COMPRESSOR,
)
|
#===============================================================================
""" Configurações Opcionais """
#===============================================================================
# Digite o e-mail abaixo para receber alertas de e-mail em tempo real
# por exemplo, '[email protected]'
MAIL = ''
# Insira o URL da câmera IP (por exemplo, url = 'http://191.138.0.100:8040/video')
url = ''
# ON / OFF para e-mail. Insira True para ativar o recurso de alerta de e-mail.
ALERT = False
# Threading ON/OFF
Thread = False
# Log simples para registrar os dados de contagem
Log = False
# Executar automaticamente / programar o software para ser executado no horário desejado
Scheduler = False
# Parar automaticamente o software após um determinado período / horas
Timer = False |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import List, Optional, Sequence, Union
import mmcv
import numpy as np
from mmcls.datasets.builder import DATASETS
from typing_extensions import Literal
from .base import BaseFewShotDataset
TRAIN_CLASSES = [
'n02074367', 'n03047690', 'n03854065', 'n02089867', 'n02105505',
'n01704323', 'n04604644', 'n03676483', 'n01558993', 'n07697537',
'n04509417', 'n02101006', 'n02165456', 'n13133613', 'n02747177',
'n02966193', 'n03924679', 'n04275548', 'n02113712', 'n03838899',
'n02091831', 'n03220513', 'n07747607', 'n03998194', 'n02108089',
'n09246464', 'n04251144', 'n02111277', 'n04435653', 'n03207743',
'n04389033', 'n03337140', 'n03908618', 'n02606052', 'n01770081',
'n01910747', 'n03062245', 'n02108551', 'n03017168', 'n04258138',
'n03888605', 'n04596742', 'n07584110', 'n02687172', 'n03476684',
'n04243546', 'n02795169', 'n02457408', 'n04515003', 'n06794110',
'n01532829', 'n01843383', 'n13054560', 'n04067472', 'n03347037',
'n04612504', 'n03400231', 'n01749939', 'n02823428', 'n04296562',
'n03527444', 'n04443257', 'n02108915', 'n02120079'
]
VAL_CLASSES = [
'n02138441', 'n02981792', 'n02174001', 'n03535780', 'n03770439',
'n03773504', 'n02950826', 'n03980874', 'n02114548', 'n03584254',
'n02091244', 'n03417042', 'n02971356', 'n01855672', 'n09256479',
'n03075370'
]
TEST_CLASSES = [
'n02110341', 'n01981276', 'n07613480', 'n02129165', 'n04418357',
'n02443484', 'n03127925', 'n01930112', 'n03272010', 'n03146219',
'n04146614', 'n03775546', 'n04522168', 'n02099601', 'n02871525',
'n02110063', 'n02219486', 'n02116738', 'n04149813', 'n03544143'
]
@DATASETS.register_module()
class MiniImageNetDataset(BaseFewShotDataset):
"""MiniImageNet dataset for few shot classification.
Args:
subset (str| list[str]): The classes of whole dataset are split into
three disjoint subset: train, val and test. If subset is a string,
only one subset data will be loaded. If subset is a list of
string, then all data of subset in list will be loaded.
Options: ['train', 'val', 'test']. Default: 'train'.
file_format (str): The file format of the image. Default: 'JPEG'
"""
resource = 'https://github.com/twitter/meta-learning-lstm/tree/master/data/miniImagenet' # noqa
TRAIN_CLASSES = TRAIN_CLASSES
VAL_CLASSES = VAL_CLASSES
TEST_CLASSES = TEST_CLASSES
def __init__(self,
subset: Literal['train', 'test', 'val'] = 'train',
file_format: str = 'JPEG',
*args,
**kwargs):
if isinstance(subset, str):
subset = [subset]
for subset_ in subset:
assert subset_ in ['train', 'test', 'val']
self.subset = subset
self.file_format = file_format
super().__init__(*args, **kwargs)
def get_classes(
self,
classes: Optional[Union[Sequence[str],
str]] = None) -> Sequence[str]:
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): Three types of input
will correspond to different processing logics:
- If `classes` is a tuple or list, it will override the
CLASSES predefined in the dataset.
- If `classes` is None, we directly use pre-defined CLASSES
will be used by the dataset.
- If `classes` is a string, it is the path of a classes file
that contains the name of all classes. Each line of the file
contains a single class name.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
class_names = []
for subset_ in self.subset:
if subset_ == 'train':
class_names += self.TRAIN_CLASSES
elif subset_ == 'val':
class_names += self.VAL_CLASSES
elif subset_ == 'test':
class_names += self.TEST_CLASSES
else:
raise ValueError(f'invalid subset {subset_} only '
f'support train, val or test.')
elif isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def load_annotations(self) -> List:
"""Load annotation according to the classes subset."""
img_file_list = {
class_name: sorted(
os.listdir(osp.join(self.data_prefix, 'images', class_name)),
key=lambda x: int(x.split('.')[0].split('_')[-1]))
for class_name in self.CLASSES
}
data_infos = []
for subset_ in self.subset:
ann_file = osp.join(self.data_prefix, f'{subset_}.csv')
assert osp.exists(ann_file), \
f'Please download ann_file through {self.resource}.'
with open(ann_file) as f:
for i, line in enumerate(f):
# skip file head
if i == 0:
continue
filename, class_name = line.strip().split(',')
filename = img_file_list[class_name][
int(filename.split('.')[0][-5:]) - 1]
gt_label = self.class_to_idx[class_name]
info = {
'img_prefix':
osp.join(self.data_prefix, 'images', class_name),
'img_info': {
'filename': filename
},
'gt_label':
np.array(gt_label, dtype=np.int64)
}
data_infos.append(info)
return data_infos
|
from .command import cmd
from .files import *
from .flags import flag
from .config import local_config
import os.path
import json
import re
def input_older_than_output(rule_config):
return max_modify_time(rule_config['in']+[rule_config['__config_fn']]) > min_modify_time(rule_config['out'])
def needs_to_run(rule_config):
global ALWAYS_RUN
if rule_config['rule'] in ALWAYS_RUN or rule_config.get('always_run', False):
return True
return input_older_than_output(rule_config)
def validate_output(rule_config):
global VALIDATE_OUTPUT
if rule_config['rule'] not in VALIDATE_OUTPUT or flag('dry_run'):
return
if input_older_than_output(rule_config):
raise Exception('Not all outputs were generated successfully.')
def parse_path(rule_path):
m = re.match(r'([^:]*):([^:]+)$', rule_path)
if not m:
raise Exception('Invalid rule path: {}'.format(rule_path))
return m.groups()
def run_build_rule(rule_path, visited=None):
global RULES
# ensure that this rule isn't part of a cyclical chain
if visited is None:
visited = []
if rule_path in visited:
raise Exception('Rule {} already visited! Cyclical dep chain detected.'.format(rule_path))
visited = visited+[rule_path]
rule_dir, rule_name = parse_path(rule_path)
# pen the correct build rule list
config_fn = os.path.join(rule_dir, 'build.json')
with open(config_fn) as f:
try:
rule_list = json.load(f)
except Exception as e:
raise RuntimeError("Could not decode json at "+config_fn+'\n'+e.msg);
# select the rule config with the correct name
rule_configs = [rc for rc in rule_list if rc['name'] == rule_name]
if not rule_configs:
raise Exception('Could not find rule {} in config file {}'.format(rule_name, config_fn))
if len(rule_configs) > 1:
raise Exception('Muliple instance of rule {} in config file {}'.format(rule_name, config_fn))
rule_config = rule_configs[0]
# do some sanity checking on the config before proceeding
for key in rule_config:
if key not in ['in', 'out', 'deps', 'name', 'rule', 'params', 'always_run']:
raise Exception('Unknown key in rule config {}, {}'.format(rule_path, key))
# build any necessary dependencies recursively first
rule_config['deps'] = rule_config.get('deps', [])
for dep in rule_config['deps']:
run_build_rule(dep, visited=visited)
# set defaults for easier handling by util functions
rule_config['in'] = deglob(rule_config.get('in', []))
rule_config['__config_fn'] = config_fn
rule_config['out'] = rule_config.get('out', [])
rule_config['rule'] = rule_config.get('rule', 'noop')
# if we don't need to run, proceed without building
if not needs_to_run(rule_config):
return
if not flag('quiet'):
print('{}'.format(rule_path))
# ensure all output file directories exist
for out_fp in rule_config['out']:
out_dir, out_fn = os.path.split(out_fp)
ensure_dir(out_dir)
# build this rule
if rule_config['rule'] not in RULES:
raise Exception('Unknown build rule {} for {}'.format(rule_config['rule'], rule_path))
RULES[rule_config['rule']](rule_config)
# verify that the output was generated
validate_output(rule_config)
def browserify(config):
assert len(config['in']) >= 1
assert len(config['out']) == 1
browserify = local_config()['browserify_bin']
in_fn = config['in'][0]
out_fn = config['out'][0]
cmd('{} {} -o {}'.format(browserify, in_fn, out_fn))
def js_test(config):
assert len(config['in']) >= 1
assert len(config['out']) == 1
flags = local_config().get('nodejs_flags', '')
cmd('nodejs {} {}'.format(flags, config['in'][0]))
cmd('touch {}'.format(config['out'][0]))
def noop(config):
pass
def stage(config):
assert len(config['out']) == 0
params = config.get('params', {})
out = params.get('out', None)
if out:
assert len(out) == len(config['in'])
sub_dir = params.get('subdir', '')
staging_dirs = params['stagingdirs']
dest_dirs = local_config()['staging_dirs']
if not staging_dirs:
staging_dirs = list(dest_dirs.keys())
for staging_dir in staging_dirs:
dest_dir = dest_dirs[staging_dir]
if sub_dir:
dest_dir = os.path.join(dest_dir, sub_dir)
ensure_dir(dest_dir)
for i, src_fn in enumerate(config['in']):
_, fn = os.path.split(src_fn)
if out:
dest_fn = os.path.join(dest_dir, out[i])
else:
dest_fn = os.path.join(dest_dir, fn)
if modify_time(dest_fn) < modify_time(src_fn):
cmd('cp {} {}'.format(src_fn, dest_fn))
def uglifyjs(config):
assert len(config['in']) >= 1
assert len(config['out']) == 1
uglifyjs = local_config()['uglifyjs_bin']
in_fn = config['in'][0]
out_fn = config['out'][0]
flags = config.get('params', {}).get('flags', '')
cmd('{} -c -m -o {} {} -- {} '.format(uglifyjs, out_fn, flags, in_fn))
RULES = {
'browserify': browserify,
'js_test': js_test,
'noop': noop,
'stage': stage,
'uglifyjs': uglifyjs,
}
ALWAYS_RUN = set([
'noop',
'stage',
])
VALIDATE_OUTPUT = set([
'browserify',
'uglifyjs',
])
|
from pymclevel import alphaMaterials, MCSchematic, MCLevel, BoundingBox
from pymclevel.box import Vector
from mcplatform import *
inputs = (
("Better Nuke", "label"),
("Creator: Colan Biemer", "label")
)
def draw_block(level, x, y, z, material):
level.setBlockAt(x, y, z, material.ID)
level.setBlockDataAt(x, y, z, 0)
def nuke(level, origin, size):
for x in range(min(origin.x, size.x), max(origin.x, size.x)):
for z in range(min(origin.z, size.z), max(origin.z, size.z)):
# loop from the top until we reach a material that is not empty
for y in reversed(range(min(origin.y, size.y), max(origin.y, size.y))):
block = level.blockAt(x,y,z)
if block == alphaMaterials.Water.ID or block == alphaMaterials.WaterActive.ID:
draw_block(level, x, y, z, alphaMaterials.Lava)
elif block == alphaMaterials.Wood.ID:
draw_block(level, x, y, z, alphaMaterials.CoalBlock)
elif block == alphaMaterials.Leaves.ID:
draw_block(level, x, y, z, alphaMaterials.Air)
elif block != alphaMaterials.Air.ID and \
block != alphaMaterials.CoalBlock.ID and \
block != alphaMaterials.Lava.ID:
draw_block(level, x, y, z, alphaMaterials.Obsidian)
break
def perform(level, box, options):
size = Vector(box.origin.x + box.size.x, box.origin.y + box.size.y, box.origin.z + box.size.z)
nuke(level, box.origin, size) |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneauth1 import loading
from keystoneauth1 import session
from keystoneclient.v3 import client
from oslo_config import cfg
PASSWORD_PLUGIN = 'password'
TRUSTEE_CONF_GROUP = 'trustee'
KEYSTONE_AUTHTOKEN_GROUP = 'keystone_authtoken'
loading.register_auth_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP)
loading.register_session_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP)
loading.register_auth_conf_options(cfg.CONF, KEYSTONE_AUTHTOKEN_GROUP)
_ZAQAR_ENDPOINTS = {}
def _config_options():
trustee_opts = loading.get_auth_common_conf_options()
trustee_opts.extend(loading.get_auth_plugin_conf_options(PASSWORD_PLUGIN))
yield TRUSTEE_CONF_GROUP, trustee_opts
def get_trusted_token(trust_id):
"""Return a Keystone token using the given trust_id."""
auth_plugin = loading.load_auth_from_conf_options(
cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=trust_id)
trust_session = loading.load_session_from_conf_options(
cfg.CONF, TRUSTEE_CONF_GROUP, auth=auth_plugin)
return trust_session.auth.get_access(trust_session).auth_token
def _get_admin_session(conf_group):
auth_plugin = loading.load_auth_from_conf_options(
cfg.CONF, conf_group)
return session.Session(
auth=auth_plugin, verify=getattr(cfg.CONF, conf_group).cafile)
def _get_user_client(auth_plugin):
sess = loading.load_session_from_conf_options(
cfg.CONF, TRUSTEE_CONF_GROUP, auth=auth_plugin)
return client.Client(session=sess, interface='public')
def create_trust_id(auth_plugin, trustor_user_id, trustor_project_id, roles,
expires_at):
"""Create a trust with the given user for the configured trustee user."""
admin_session = _get_admin_session(TRUSTEE_CONF_GROUP)
trustee_user_id = admin_session.get_user_id()
client = _get_user_client(auth_plugin)
trust = client.trusts.create(trustor_user=trustor_user_id,
trustee_user=trustee_user_id,
project=trustor_project_id,
impersonation=True,
role_names=roles,
expires_at=expires_at)
return trust.id
def get_public_endpoint():
"""Get Zaqar's public endpoint from keystone"""
global _ZAQAR_ENDPOINTS
if _ZAQAR_ENDPOINTS:
return _ZAQAR_ENDPOINTS
zaqar_session = _get_admin_session(KEYSTONE_AUTHTOKEN_GROUP)
auth = zaqar_session.auth
if not auth:
return _ZAQAR_ENDPOINTS
catalogs = auth.get_auth_ref(zaqar_session).service_catalog
try:
_ZAQAR_ENDPOINTS['zaqar'] = catalogs.url_for(service_name='zaqar')
except Exception:
pass
try:
_ZAQAR_ENDPOINTS['zaqar-websocket'] = catalogs.url_for(
service_name='zaqar-websocket')
except Exception:
pass
return _ZAQAR_ENDPOINTS
|
from flask import Flask, jsonify, abort, render_template, request, redirect, url_for, g, send_from_directory
import subprocess
import os
import psutil
import socket
import signal
import RPi.GPIO as GPIO
import csv
import Adafruit_DHT
import threading
from collections import deque
import datetime
import telebot
import my_token
app = Flask(__name__)
GPIO_LIGHT = 18
GPIO_MOVEMENT = 17
def after_this_request(func):
if not hasattr(g, 'call_after_request'):
g.call_after_request = []
g.call_after_request.append(func)
return func
@app.after_request
def per_request_callbacks(response):
for func in getattr(g, 'call_after_request', ()):
response = func(response)
return response
# ===================================== WEB INTERFACE =====================================
# Корень
@app.route('/test')
def index_test():
return 'Привет, я малинка'
# Favicon
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
# Index
@app.route('/')
def index():
return redirect('/panel/status')
# Control Panel
@app.route('/panel/<page>')
def index_page(page):
return render_template('index.html', page=page)
# Panel content
@app.route('/panel_get_content/<page>')
def get_content_for_control_panel(page):
return render_template(page + '.html')
# ======================================== RESTful API ========================================
# Корень
@app.route('/api')
def index_api():
return 'Quantum Raspberry Server - API v0.3'
# =================== Управление ботами ==================
bots = [
{
'id': 0,
'name': 'Quantum Bot',
'path': '/home/pi/Autorun/Run/RaspberryUSBAndTelegram.exe',
'running': False,
'pid': -1,
'autorun': False
}
,
{
'id': 1,
'name': 'Shurya Chat Bot',
'path': '/home/pi/Autorun/Run/ShuryaChatBot.exe',
'running': False,
'pid': -1,
'autorun': True
}
,
{
'id': 2,
'name': 'Reminder Bot',
'path': '/home/pi/Autorun/Run/ReminderBot.exe',
'running': False,
'pid': -1,
'autorun': False
}
]
@app.route('/api/bots', methods=['GET'])
def api_bots_list():
return jsonify({'bots': bots})
@app.route('/api/bots/<int:bot_id>', methods=['GET'])
def api_bot(bot_id):
bot = list(filter(lambda t: t['id'] == bot_id, bots))
if len(bot)==0:
abort(404)
return jsonify({'bot': bot[0]})
@app.route('/api/bots', methods=['POST'])
def api_add_bot():
if not request.json or not 'path' in request.json:
abort(400)
bot = {
'id': bots[-1]['id'] + 1,
'name': request.json.get('name', ""),
'autorun': request.json.get('autorun', False),
'path': request.json['path'],
'pid': -1,
'running': False
}
bots.append(bot)
SaveBotsToFile()
return jsonify({'bot': bot}), 201
@app.route('/api/bots/<int:bot_id>', methods=['DELETE'])
def delete_bot(bot_id):
bot = list(filter(lambda t: t['id'] == bot_id, bots))
if len(bot) == 0:
abort(404)
bots.remove(bot[0])
SaveBotsToFile()
return jsonify({'result': True})
@app.route('/api/bots/run/<int:bot_id>', methods=['GET'])
def run_bot(bot_id):
bot = list(filter(lambda t: t['id'] == bot_id, bots))
if len(bot) == 0:
abort(404)
if bot[0]['running']==True:
return jsonify({'result': False})
else:
bot[0]['running'] = True
_run_bot(bot[0])
return jsonify({'result': True})
def _run_bot(bot):
with open(bot['path'] + "stdout.txt","wb") as out, open(bot['path'] + "stderr.txt","wb") as err:
subproc = subprocess.Popen(["mono", bot['path']], stdout=out, stderr=err)
bot['pid'] = subproc.pid
@app.route('/api/bots/stop/<int:bot_id>', methods=['GET'])
def stop_bot(bot_id):
bot = list(filter(lambda t: t['id'] == bot_id, bots))
if len(bot) == 0:
abort(404)
if bot[0]['running']==False:
return jsonify({'result': False})
else:
os.system('sudo kill ' + str(bot[0]['pid']))
#os.kill(bot[0]['pid'], signal.SIGTERM)
bot[0]['running']=False
return jsonify({'result': True})
# =================== Матрица в DEV/TTY1 ==================
# Запуск "матрицы" на подключённом экране
@app.route('/api/matrix/<i>')
def matrix(i):
if i=='1':
os.system('sudo cmatrix 1>/dev/tty1 &')
elif i=='0':
os.system('sudo killall cmatrix')
os.system('sudo clear > /dev/tty1')
else:
abort(400)
return jsonify({'result': True})
# =================== Выключение и перезагрузка ==================
# Выключение
@app.route('/api/shutdown')
def shutdown():
@after_this_request
def closescrypt(response):
exit()
GPIO.remove_event_detect(GPIO_MOVEMENT)
GPIO.cleanup()
os.system('sudo shutdown -h now &')
return jsonify({'result': True}) #'Shutdowning Raspberry Pi..'
# Перезагрузка
@app.route('/api/reboot')
def reboot():
@after_this_request
def closescrypt(response):
exit()
GPIO.remove_event_detect(GPIO_MOVEMENT)
GPIO.cleanup()
os.system('sudo reboot &')
return jsonify({'result': True}) #'Rebooting Raspberry Pi..'
# =========================== GPIO ===========================
# GPIO.OUT = 0; GPIO.IN = 1
# GPIO.PUD_OFF = 20; GPIO.PUD_DOWN = 21; GPIO.PUD_UP = 22
# GPIO Setup
@app.route('/api/gpio/<int:channel>', methods=['POST'])
def gpiosetup(channel):
if not request.json or not 'Direction' in request.json or not 'Resistor' in request.json or not 'Value' in request.json:
abort(400)
dir = request.json['Direction']
pull = request.json['Resistor']
val = request.json['Value']
if (dir == -1):
abort(400)
if (dir == GPIO.OUT):
if (pull != GPIO.PUD_OFF and pull != -1):
abort(400)
if (pull == -1):
GPIO.setup(channel, dir)
else:
GPIO.setup(channel, dir, pull)
if (dir == GPIO.OUT and val != -1):
GPIO.output(channel, val)
opened_pins.add(channel)
result = {
'Channel': channel,
'Direction': dir,
'Resistor': pull,
'Value': GPIO.input(channel)
}
return jsonify({'GPIO': result})
# GPIO Output
@app.route('/api/gpio/<int:channel>/<int:value>', methods=['GET'])
def gpiooutput(channel, value):
if (value != 0 and value != 1):
abort(400)
try:
GPIO.output(channel, value)
return jsonify({'result': True})
except Exception as e:
return jsonify({'result': False, 'exception': str(e)})
# GPIO Input
@app.route('/api/gpio/<int:channel>', methods=['GET'])
def gpioinput(channel):
try:
value = GPIO.input(channel)
return jsonify({'result': True, 'value': value})
except Exception as e:
return jsonify({'result': False, 'exception': str(e)})
# GPIO Quick Setup
@app.route('/api/gpio/setup/<int:channel>/<int:dir>', methods=['GET'])
def gpioqsetup(channel, dir):
try:
GPIO.setup(channel, dir)
opened_pins.add(channel)
return jsonify({'result': True})
except Exception as e:
return jsonify({'result': False, 'exception': str(e)})
# =========================== Stats ===========================
opened_pins = set()
dhtdata = {'temp': 0, 'hum': 0}
@app.route('/api/stats', methods=['GET'])
def stats():
vm = psutil.virtual_memory()
cpuusg = str(psutil.cpu_percent(interval=1))
cputemp = getCPUtemperature()
memusd = vm.percent
memtotal = vm.total
cpu = {
'usage': cpuusg,
'temperature': cputemp
}
mem = {
'used': memusd,
'total': memtotal
}
gpio = {pin: GPIO.input(pin) for pin in opened_pins}
stat = {
'external': dhtdata,
'memory': mem,
'cpu': cpu,
'gpio': gpio,
}
return jsonify({'stats': stat})
#=========================== High level control ======================
light_settings = {
'time': False,
'time_on': datetime.time(hour=22, minute=30, second=0),
'time_off': datetime.time(hour=2, minute=0, second=0),
'movement': True,
'movement_delay': datetime.timedelta(hours=0, minutes=5),
'movement_from': datetime.time(hour=21, minute=0, second=0),
'movement_to': datetime.time(hour=3, minute=30, second=0),
'movement_turn_off_in': None,
'value': 0 #0 - auto, -1 - off, 1 - on
}
movements = deque()
def halfminutetimer():
if (light_settings['time']):
is_between = time_between(light_settings['time_on'], light_settings['time_off'], datetime.datetime.now().time())
is_light = GPIO.input(GPIO_LIGHT) == GPIO.HIGH
if (not is_light and is_between):
GPIO.output(GPIO_LIGHT, True)
elif (is_light and not is_between):
GPIO.output(GPIO_LIGHT, False)
if (light_settings['movement_turn_off_in'] != None):
if datetime.datetime.now() > light_settings['movement_turn_off_in']:
light_settings['movement_turn_off_in'] = None
GPIO.output(GPIO_LIGHT, False)
threading.Timer(30.0, halfminutetimer).start()
@app.route('/api/autolight', methods=['GET'])
def get_autolight():
return jsonify({'settings': light_settings})
@app.route('/api/light', methods=['GET'])
def get_light():
try:
value = GPIO.input(GPIO_LIGHT)
return jsonify({'result': True, 'value': value})
except Exception as e:
return jsonify({'result': False, 'exception': str(e)})
@app.route('/api/light/<int:value>', methods=['GET'])
def set_light(value):
try:
GPIO.output(GPIO_LIGHT, value)
return jsonify({'result': True})
except Exception as e:
return jsonify({'result': False, 'exception': str(e)})
@app.route('/api/movement/now', methods=['GET'])
def get_now_movement():
try:
value = GPIO.input(GPIO_MOVEMENT)
return jsonify({'result': True, 'value': value})
except Exception as e:
return jsonify({'result': False, 'exception': str(e)})
@app.route('/api/movement', methods=['GET'])
def get_movements():
return jsonify({'movements': list(movements)})
def init_high_level_control():
GPIO.setup(GPIO_LIGHT, GPIO.OUT)
GPIO.setup(GPIO_MOVEMENT, GPIO.IN)
GPIO.add_event_detect(GPIO_MOVEMENT, GPIO.BOTH, bouncetime=350, callback=movement_changed_callback)
# у меня не получилось сделать глобальную переменную( поэтому будет такой костыль
movementstart = {'value': None}
def movement_changed_callback(channel):
if (GPIO.input(channel) == GPIO.HIGH):
movement_rising_callback(channel)
else:
movement_falling_callback(channel)
def movement_rising_callback(channel):
movementstart['value'] = datetime.datetime.now()
if (light_settings['movement'] and time_between(light_settings['movement_from'], light_settings['movement_to'], datetime.datetime.now().time())):
GPIO.output(GPIO_LIGHT, GPIO.HIGH)
def movement_falling_callback(channel):
movements.append({'start': movementstart['value'].isoformat(' '), 'end': datetime.datetime.now().isoformat(' ')})
if (len(movements) > 64):
movements.popleft()
if (light_settings['movement'] and time_between(light_settings['movement_from'], light_settings['movement_to'], datetime.datetime.now().time())):
light_settings['movement_turn_off_in'] = datetime.datetime.now() + light_settings['movement_delay']
def time_between(From, To, current):
if (From < To):
return current > From and current < To
else:
return current > From or current < To
# ====================== Light control via Telebot =====================
bot = telebot.TeleBot(my_token.TELEBOT_TOKEN)
@bot.message_handler(commands=['light_on']) #content_types=["text"]
def bot_light_on(message):
GPIO.output(GPIO_LIGHT, 1)
bot.send_message(message.chat.id, 'Свет включён')
@bot.message_handler(commands=['light_off'])
def bot_light_on(message):
GPIO.output(GPIO_LIGHT, 0)
bot.send_message(message.chat.id, 'Свет выключен')
# ====================== Ниже не доделано ============================
# Запуск Spigot-сервера
@app.route('/api/minecraft/spigot')
def spigot():
os.system('sudo java -Xms384M -Xmx740M -jar /home/minecraft/spigot-1.11.2.jar nogui')
return 'Done'
# Запуск CraftBukkit-сервера
@app.route('/api/minecraft/bukkit')
def bukkit():
os.system('sudo java -Xms384M -Xmx740M -jar /home/minecraft/craftbukkit-1.11.2.jar nogui')
return 'Done'
# Запуск Vanila-сервера с Forge
@app.route('/api/minecraft/forge')
def forge():
os.system('sudo java -Xms384M -Xmx740M -jar /home/minecraft/forge-1.11.2-13.20.0.2228-universal.jar nogui')
return 'Done'
# Перезапуск ботов
@app.route('/api/restart_bots')
def restartbots():
os.system('sudo killall mono')
os.system('/home/pi/Autorun/Autorun.py')
return 'Done'
# Перезапуск ботов
@app.route('/api/stop_bots')
def stopbots():
os.system('sudo killall mono')
return 'Done'
# Статистика RAM
@app.route('/api/memory/<val>')
def meminfo(val):
p = psutil.virtual_memory()
if val=='free':
return str(100-p.percent)
elif val=='total':
return str(p.total)
elif val=='used':
return str(p.percent)
else:
return '?'
# Текущая температура процессора
@app.route('/api/cpu_temp')
def cputemp():
return getCPUtemperature()
# Статистика использования процессора
@app.route('/api/cpu')
def cpuusage():
return str(psutil.cpu_percent(interval=1))
# Запуск сервиса motion
@app.route('/api/motion/start')
def startmotion():
os.system('sudo service motion start &')
return redirect(request.url[:request.url.index('5000')] + '8081/', code=302)
# Остановка сервиса motion
@app.route('/api/motion/stop')
def stopmotion():
os.system('sudo service motion stop')
return jsonify({'result': True})
# Проверка текущего состояния сервиса motion
@app.route('/api/motion')
def statusmotion():
output = subprocess.check_output(["service", "sshd", "status"], stderr=subprocess.STDOUT)
if 'inactive (dead)' in output:
return 'False'
else:
return 'True'
# Запуск стриминга на Picarto
@app.route('/api/stream/picarto')
def picarto_stream():
os.system('ffmpeg -f v4l2 -framerate 20 -video_size 640x480 -i /dev/video0 -c:v libx264 -b:v 500k -maxrate 500k -bufsize 500k -an -f flv rtmp://live.us.picarto.tv/golive/...')
return 'Stream started'
# Запуск стриминга на YouTube
@app.route('/api/stream/youtube')
def youtube_stream():
os.system('ffmpeg -ar 44100 -ac 2 -acodec pcm_s16le -f s16le -ac 2 -i /dev/zero -f v4l2 -c:a aac -framerate 5 -video_size 640x480 -i /dev/video0 -c:v libx264 -b:v 200k -maxrate 200k -bufsize 200k -vcodec h264 -g 60 -strict experimental -f flv rtmp://a.rtmp.youtube.com/live2/...')
return 'Stream started'
# update системы
@app.route('/api/update')
def update():
os.system('sudo killall cmatrix')
os.system('sudo apt-get update & > /dev/tty1')
return 'Updating..'
# upgrade системы
@app.route('/api/upgrade')
def upgrate():
os.system('sudo killall cmatrix')
os.system('sudo apt-get upgrade & > /dev/tty1')
return 'Upgrading..'
def getCPUtemperature():
res = os.popen('vcgencmd measure_temp').readline()
return(res.replace("temp=","").replace("'C\n",""))
def LoadBotsFromFile():
if (os.path.isfile("bots_data.txt")):
with open("bots_data.txt", "r") as f:
reader = csv.reader(f, delimiter="|")
bots = list(reader)
pass
def SaveBotsToFile():
with open("bots_data.txt", "w") as f:
writer = csv.writer(f, delimiter="|")
writer.writerows(lines)
pass
def updatedht():
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 4)
dhtdata['hum'] = humidity
dhtdata['temp'] = temperature
threading.Timer(15.0, updatedht).start()
# ====================================== Main ========================================
if __name__ == '__main__':
LoadBotsFromFile()
for b in bots:
if (b['running']):
b['running'] = False;
if (b['autorun']):
b['running'] = True
_run_bot(b)
GPIO.setmode(GPIO.BCM)
init_high_level_control()
updatedht()
halfminutetimer()
bot.polling(none_stop=True)
app.run(debug=True, host='0.0.0.0', port = 5000) #port for testing |
from pyautodiff.dual import Dual as Dual
import numpy as np
def sin(x):
"""Calculate sine of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the sine value
"""
if (isinstance(x,Dual)):
x.der = np.cos(x.val)*x.der
x.val = np.sin(x.val)
return x
else:
return np.sin(x)
def cos(x):
"""Calculate cosine of the input
Keyword arguments:numpy.tan
x -- a real number or a dual number
Return:
the cosine value
"""
if (isinstance(x,Dual)):
x.der = -1 * np.sin(x.val)*x.der
x.val = np.cos(x.val)
return x
else:
return np.cos(x)
def tan(x):
"""Calculate tangent of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the tanget value
"""
if (isinstance(x,Dual)):
x.der = 1/np.cos(x.val)**2*x.der
x.val = np.tan(x.val)
return x
else:
return np.tan(x)
def log(x):
"""Calculate the natural log of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the natural log value
"""
if (isinstance(x,Dual)):
x.der = (1/x.val)*x.der
x.val = np.log(x.val)
return x
else:
return np.log(x)
def log2(x):
"""Calculate the log2 of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the log2 value
"""
if (isinstance(x,Dual)):
x.der = (1/(x.val*np.log(2)))*x.der
x.val = np.log2(x.val)
return x
else:
return np.log2(x)
def log10(x):
"""Calculate the log10 of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the log10 value
"""
if (isinstance(x,Dual)):
x.der = (1/(x.val*np.log(10)))*x.der
x.val = np.log10(x.val)
return x
else:
return np.log10(x)
def logb(x, base):
"""Calculate the log of the input with bases b
Keyword arguments:
x -- a real number or a dual number
Return:
the log value with base b
"""
if (isinstance(x,Dual)):
x.der = (1/x.val/np.log(base)) * x.der
x.val = np.log(x.val) / np.log(base)
return x
else:
return np.log(x) / np.log(base)
def exp(x):
"""Calculate the exponential of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the exponential value
"""
if (isinstance(x,Dual)):
x.der = np.exp(x.val) * x.der
x.val = np.exp(x.val)
return x
else:
return np.exp(x)
def power(x1, x2):
"""Calculate the exponential of the input x1 with base x2
Keyword arguments:
x1 -- a real number or a dual number
x2 -- a real number or a dual number
Return:
the exponential value x1 with base x2
"""
if (isinstance(x1,Dual)) and (isinstance(x2,Dual)):
# da^u/dx = ln(a) a^u du/dx
factor = x1.val ** (x2.val -1)
sum_1 = x2.val * x1.der
sum_2 = x1.val * np.log(x1.val) * x2.der
temp = factor * (sum_1 + sum_2)
return Dual(x1.val ** x2.val, temp)
elif (isinstance(x1,Dual)):
# du^n/dx = n * u^(n-1) * du/dx
temp = x2 * x1.val ** (x2-1) * x1.der
return Dual(x1.val ** x2, temp)
elif (isinstance(x2,Dual)):
# da^u/dx = ln(a) a^u du/dx
temp = np.log(x2.val) * x2.val ** x1 * x2.der
return Dual(x1 ** x2.val, temp)
else:
return np.power(x1,x2)
def sqrt(x):
"""Calculate the square root of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the square root value
"""
if (isinstance(x,Dual)):
x.der = 0.5/np.sqrt(x.val) * x.der
x.val = np.sqrt(x.val)
return x
else:
return np.sqrt(x)
def arcsin(x):
"""Calculate the arcsin of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the arcsin value
"""
if (isinstance(x,Dual)):
x.der = 1 / np.sqrt(1 - x.val **2) * x.der
x.val = np.arcsin(x.val)
return x
else:
return np.arcsin(x)
def arccos(x):
"""Calculate the arccos of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the arccos value
"""
if (isinstance(x,Dual)):
x.der = -1 / np.sqrt(1 - x.val**2) * x.der
x.val = np.arccos(x.val)
return x
else:
return np.arccos(x)
def arctan(x):
"""Calculate the arccos of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the arctan value
"""
if (isinstance(x,Dual)):
x.der = 1 / (1 + x.val**2) * x.der
x.val = np.arctan(x.val)
return x
else:
return np.arctan(x)
def sinh(x):
"""Calculate the sinh of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the sinh value
"""
if (isinstance(x,Dual)):
x.der = np.cosh(x.val) * x.der
x.val = np.sinh(x.val)
return x
else:
return np.sinh(x)
def cosh(x):
"""Calculate the cosh of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the cosh value
"""
if (isinstance(x,Dual)):
x.der = np.sinh(x.val) * x.der
x.val = np.cosh(x.val)
return x
else:
return np.cosh(x)
def tanh(x):
"""Calculate the tanh of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the tanh value
"""
if (isinstance(x,Dual)):
x.der = x.der / np.cosh(x.val)
x.val = np.tanh(x.val)
return x
else:
return np.tanh(x)
def help_logistic(x, L=1, k=1, x0=1):
return L/(1 + np.exp(-k*(x-x0)))
def logistic(x, L=1, k=1, x0=1):
"""Calculate the logistic of the input
Keyword arguments:
x -- a real number or a dual number
Return:
the logistic value
"""
if (isinstance(x,Dual)):
temp = help_logistic(x.val,L,k,x0)
x.der = temp * (1 - temp ) * x.der
x.val = temp
return x
else:
return help_logistic(x, L, k, x0)
# (5*x+3+4)
# der = 5*x.der, val = 5*x.val+3+4
def sum(xs):
"""Calculate the sum of the input
Keyword arguments:
xs -- a real value list
Return:
the sum of the array
"""
cur_val = 0
cur_der = 0
is_dual = False
# print('xs')
for x in xs:
# print('val: ',x.val)
if (isinstance(x,Dual)):
is_dual = True
cur_der += x.der
cur_val += x.val
else:
cur_val += x
if is_dual:
return Dual(cur_val,cur_der)
else:
return cur_val
def abs(x):
"""Calculate the sum of the input
Keyword arguments:
x -- a real value
Return:
the absolute value of x
"""
if (isinstance(x,Dual)):
x.der = x.val/np.abs(x.val)
x.val = np.abs(x.val)
return x
else:
return np.abs(x)
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return #TODO call openCV function grayscale
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return #TODO
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return #TODO
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def drawLine(img, x, y, color=[255, 0, 0], thickness=10):
"""
Point interpolation
Adjust a line to the points [`x`, `y`] and draws it on the image `img` using `color` and `thickness` for the line.
"""
if len(x) == 0:
return
lineParameters = np.polyfit(x, y, 1)
m = lineParameters[0]
b = lineParameters[1]
maxY = img.shape[0]
maxX = img.shape[1]
y1 = maxY
x1 = int((y1 - b)/m)
y2 = int((maxY/2)) + 60
x2 = int((y2 - b)/m)
cv2.line(img, (x1, y1), (x2, y2), [255, 0, 0], thickness)
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
leftPointsX = []
leftPointsY = []
rightPointsX = []
rightPointsY = []
for line in lines:
for x1,y1,x2,y2 in line:
m = #TODO (compute the slope)
if m < 0:
leftPointsX.append(x1)
leftPointsY.append(y1)
leftPointsX.append(x2)
leftPointsY.append(y2)
else:
rightPointsX.append(x1)
rightPointsY.append(y1)
rightPointsX.append(x2)
rightPointsY.append(y2)
drawLine(img, leftPointsX, leftPointsY, color, thickness)
drawLine(img, rightPointsX, rightPointsY, color, thickness)
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def process_image_pipeline(image):
#1 grayscale the image
gray = grayscale(image)
#2 Apply Gaussian smoothing
blur_gray = gaussian_blur(image, kernel_size = 5)
#3 Apply Canny in order to perform the edge detection
edges = canny(blur_gray, low_threshold = #TODO, high_threshold = #TODO)
#4 This time we are defining a four sided polygon to mask
imshape = image.shape
#vertices that defines our region of interest!
vertices = np.array([[(#TODOx,#TODOy),(#TODO, #TODO), (#TODO, #TODO), (#TODO,#TODO)]], dtype=np.int32)
masked_edges=region_of_interest(edges, vertices)
#5 Define the Hough transform parameters (based on guess and looking which was the output :p)
# Make a blank the same size as our image to draw on
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = #TODO # minimum number of votes (intersections in Hough grid cell)
min_line_length = #TODO #minimum number of pixels making up a line
max_line_gap = #TODO # maximum gap in pixels between connectable line segments
line_image = np.copy(image) # creating a blank to draw lines on
#6 Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
# Define the Hough transform parameters
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image (this function
image_wlines = cv2.addWeighted(lines, 0.8, line_image, 1, 0)
return image_wlines
|
# Filename: Controller.py
# Written by: Niranjan Bhujel
# Description: Contains controller such as LQR, MPC, etc.
from math import inf, isinf
from pynlcontrol.BasicUtils import Integrate, nlp2GGN, casadi2List, directSum, __SXname__
import casadi as ca
def LQR(A, B, C, D, Q, R, Qt, Ts, horizon=inf, reftrack=False, NMAX=1000, tol=1e-5, Integrator='rk4'):
"""
Function to implement discrete-time linear quadratic regulator (LQR).
Parameters
----------
A : numpy.2darray or casadi.SX.array
Continuous time state matrix
B : numpy.2darray or casadi.SX.array
Continuous time input matrix
C : numpy.2darray or casadi.SX.array
Continuous time output matrix
D : numpy.2darray or casadi.SX.array
Continuous time output matrix coefficient of input
Q : numpy.2darray or casadi.SX.array
Weight to penalize control error
R : numpy.2darray or casadi.SX.array
Weight to penalize control effort
Qt : numpy.2darray or casadi.SX.array
Weight of terminal cost to penalize control error
Ts : float
Sample time of controller
horizon : int, optional
Horizon length of LQR. Defaults to inf for infinite horizon LQR problem.
reftrack : bool, optional
Whether problem is reference tracking. Defaults to False.
NMAX : int, optional
Maximum iteration for solving matrix Ricatti equation. Defaults to 1000.
tol : float, optional
Tolerance for solution of matrix Ricatti equation. Defaults to 1e-5.
Integrator : str, optional
Integrator to be used for discretization. Defaults to 'rk4'.
Returns
-------
tuple
Tuple of Input, Output, Input name and Output name. Inputs are x or [x, r] (depending upon the problem is reference tracking or not) and output are u and K.
Input and output are casadi symbolics (`casadi.SX`).
These inputs are and outputs can be mapped using `casadi.Function` which can further be code generated.
Example
-------
>>> from pynlcontrol import Controller, BasicUtils
>>> import casadi as ca
>>> Q11 = ca.SX.sym('Q11')
>>> Q22 = ca.SX.sym('Q22')
>>> Q33 = ca.SX.sym('Q33')
>>> Q = BasicUtils.directSum([Q11, Q22, Q33])
>>> R11 = ca.SX.sym('R11')
>>> R22 = ca.SX.sym('R22')
>>> R = BasicUtils.directSum([R11, R22])
>>> A = ca.SX([[-0.4,0.1,-2],[0,-0.3,4],[1,0,0]])
>>> B = ca.SX([[1,1],[0,1],[1,0]])
>>> C = ca.SX([[1, 0, 0], [0, 1, 0]])
>>> D = ca.SX([[0, 0], [0, 0]])
>>> In, Out, InName, OutName = Controller.LQR(A=A, B=B, C=C, D=D, Q=Q, R=R, Qt=Q, Ts=0.1, horizon=10, reftrack=True)
>>> lqr_func = ca.Function('lqr_func', In + [Q11, Q22, Q33, R11, R22], Out, InName + ['Q11', 'Q22', 'Q33', 'R11', 'R22'], OutName)
>>> BasicUtils.Gen_Code(lqr_func, 'lqr_code', printhelp=True)
x(3, 1), ref(2, 1), Q11(1, 1), Q22(1, 1), Q33(1, 1), R11(1, 1), R22(1, 1) -> u(2, 1), K(6, 1)
lqr_code.c
lqr_code_Call.c
#include "lqr_code.h"
#include "lqr_code_Call.h"
lqr_code_Call_Func(x, ref, Q11, Q22, Q33, R11, R22, u, K);
Running above code generates C-codes for LQR implementation. Implementation using Simulink can be found in example folder.
"""
nX = A.shape[0]
nU = B.shape[1]
nY = C.shape[0]
assert D.shape[1] == B.shape[1], "Inconsistent shape of B and D"
assert nY == nU, "Number of control inputs and controlled outputs should be same."
assert nX == Q.shape[0] == Q.shape[1], "Error in size of Q"
assert nX == Qt.shape[0] == Qt.shape[1], "Error in size of Qt"
assert nY == R.shape[0] == R.shape[1], "Error in size of R"
x = ca.SX.sym('x', nX, 1)
u = ca.SX.sym('u', nU, 1)
def Fc(x, u):
return A @ x + B @ u
xk1 = Integrate(Fc, Integrator, Ts, x, u)
Ad = ca.jacobian(xk1, x)
Bd = ca.jacobian(xk1, u)
P = Qt
MAXITER = NMAX if isinf(horizon) else horizon
for _ in range(MAXITER):
P = Ad.T @ P @ Ad - (Ad.T @ P @ Bd) @ ca.inv(R +
Bd.T @ P @ Bd) @ (Bd.T @ P @ Ad) + Q
K = ca.inv(R + Bd.T @ P @ Bd) @ (Bd.T @ P @ Ad)
u = -K @ x
if reftrack:
r = ca.SX.sym('r', nY, 1)
tmp = ca.vertcat(
ca.horzcat(A, B),
ca.horzcat(C, D)
)
Nxu = ca.inv(tmp) @ ca.vertcat(ca.GenSX_zeros(nX, nY), ca.SX_eye(nY))
Nx = Nxu[0:nX, :]
Nu = Nxu[nX:, :]
u += (Nu + K@Nx)@r
return [x, r], [u, K.T.reshape((-1, 1))], ['x', 'ref'], ['u', 'K']
else:
return [x], [u, K.T.reshape((-1, 1)), P], ['x'], ['u', 'K', 'P']
def simpleMPC(nX, nU, nY, nP, Fc, Hc, Gk, Gn, N, Ts, uLow, uUpp, GGN=False, solver='qrqp', Integrator='rk4', Options=None):
"""
Function to generate simple MPC code using `qrqp` solver. For use with other advanced solver, see `MPC` class.
Parameters
----------
nX : int
Number of state variables.
nU : int
Number of input variables
nY : int
Number of control output variables
nP : int
Number of external parameters
Fc : function
Function that returns right hand side of state equation.
Hc : function
Function that returns right hand side of control output equation.
Gk : function
Function that returns general constraints. Input arguments should be x, u, p and return as tuple (g, lbg, ubg)
Gn : function
Function that returns general terminal constraints. Input arguments should be x, p and return as tuple (g, lbg, ubg)
N : float or casadi.SX array or numpy.2darray
Horizon length
Ts : float
Sample time
uLow : list or float
Lower limit on control input
uUpp : list of str
Upper limit on control input
GGN : bool, optional
Whether generalized Gauss Newton should be used. Use only for nonlinear problem. by default False
Integrator : str, optional
Integration method. See `BasicUtils.Integrate()` function. by default 'rk4'
Options : _type_, optional
Option for `qrqp` solver. Defaults to None.
Returns
-------
tuple:
Tuple of Input, Output, Input name and Output name. Input and output are list of casadi symbolics (`casadi.SX`).
Inputs are initial guess, current state, reference, corresponding weights
Outputs value of all decision variables, calculated control signal and cost function
Example
-------
>>> import casadi as ca
>>> from pynlcontrol import BasicUtils, Controller
>>> def Fc(x, u):
A = ca.SX(
[
[-0.4, 0.1, -2],
[0, -0.3, 4],
[1, 0, 0]
]
)
B = ca.SX(
[
[1, 1],
>>> def Hc(x):
return ca.vertcat(x[0], x[1])
>>> In, Out, InName, OutName = Controller.simpleMPC(3, 2, 2, 0, Fc, Hc, None, None, 25, 0.1, [-10, 0], [10, 3], GGN=False)
-------------------------------------------
This is casadi::QRQP
Number of variables: 128
Number of constraints: 78
Number of nonzeros in H: 100
Number of nonzeros in A: 453
Number of nonzeros in KKT: 1112
Number of nonzeros in QR(R): 1728
-------------------------------------------
This is casadi::Sqpmethod.
Using exact Hessian
Number of variables: 128
Number of constraints: 78
Number of nonzeros in constraint Jacobian: 453
Number of nonzeros in Lagrangian Hessian: 100
>>> MPC_func = ca.Function('MPC_func', In, Out, InName, OutName)
>>> BasicUtils.Gen_Code(MPC_func, 'MPC_Code', printhelp=True, optim=True)
zGuess(128, 1), x0(3, 1), xref(2, 1), Q(2, 2), Qt(2, 2), R(2, 2) -> zOut(128, 1), uCalc(2, 1), Cost(1, 1)
MPC_Code.c
MPC_Code_Call.c
#include "MPC_Code.h"
#include "MPC_Code_Call.h"
MPC_Code_Call_Func(zGuess, x0, xref, Q, Qt, R, zOut, uCalc, Cost);
"""
if Options is None:
Options = {}
X = ca.SX.sym('X', nX, N+1)
U = ca.SX.sym('U', nU, N)
P = ca.SX.sym('P', nP, 1)
X0 = ca.SX.sym('X0', nX, 1)
Xref = ca.SX.sym('Xref', nY, 1)
Q = ca.SX.sym('Q', nY, nY)
Qt = ca.SX.sym('Qt', nY, nY)
R = ca.SX.sym('R', nU, nU)
J = ca.vertcat()
for k in range(N):
J = ca.vertcat(
J,
Q @ (Hc(X[:,k], P) - Xref),
R @ U[:,k]
)
J = ca.vertcat(
J,
Qt @ (Hc(X[:,N], P) - Xref)
)
g = ca.vertcat()
lbg = ca.vertcat()
ubg = ca.vertcat()
# Initial state constraints
g = ca.vertcat(
g,
X[:, 0] - X0,
)
lbg = ca.vertcat(
lbg,
ca.GenSX_zeros(nX, 1),
)
ubg = ca.vertcat(
ubg,
ca.GenSX_zeros(nX, 1),
)
# State equations constraints
for k in range(N):
Xnext = Integrate(Fc, Integrator, Ts, X[:, k], U[:, k], P) if nP > 0 else Integrate(Fc, Integrator, Ts, X[:, k], U[:, k], P)
g = ca.vertcat(
g,
X[:, k+1] - Xnext,
)
lbg = ca.vertcat(
lbg,
ca.GenSX_zeros(nX, 1),
)
ubg = ca.vertcat(
ubg,
ca.GenSX_zeros(nX, 1),
)
if Gk is not None:
for k in range(N):
g_tmp, lbg_tmp, ubg_tmp = Gk(X[:,k], U[:,k], P)
g = ca.vertcat(g, g_tmp)
lbg = ca.vertcat(lbg, lbg_tmp)
ubg = ca.vertcat(ubg, ubg_tmp)
if Gn is not None:
g_tmp, lbg_tmp, ubg_tmp = Gn(X[:,N], P)
g = ca.vertcat(g, g_tmp)
lbg = ca.vertcat(lbg, lbg_tmp)
ubg = ca.vertcat(ubg, ubg_tmp)
lbx = ca.vertcat()
ubx = ca.vertcat()
for k in range(N):
lbx = ca.vertcat(
lbx,
ca.vertcat(*uLow)
)
ubx = ca.vertcat(
ubx,
ca.vertcat(*uUpp)
)
for k in range(N+1):
lbx = ca.vertcat(
lbx,
-ca.inf*ca.GenSX_ones(nX, 1)
)
ubx = ca.vertcat(
ubx,
ca.inf*ca.GenSX_ones(nX, 1)
)
z = ca.vertcat(
U.reshape((-1, 1)),
X.reshape((-1, 1))
)
pIn = ca.vertcat(X0, Xref, Q.reshape((-1, 1)), Qt.reshape((-1, 1)), R.reshape((-1, 1)), P) if nP > 0 else ca.vertcat(X0, Xref, Q.reshape((-1, 1)), Qt.reshape((-1, 1)), R.reshape((-1, 1)))
if GGN:
nlp = nlp2GGN(z, J, g, lbg, ubg, pIn)
nlp['p'] = ca.vertcat(
nlp['p'],
nlp['zOp'],
)
else:
nlp = {
'x': z,
'f': ca.norm_2(J)**2,
'g': g,
'lbg': lbg,
'ubg': ubg,
'p': pIn
}
MPC_prob = {
'x': nlp['x'],
'f': nlp['f'],
'g': nlp['g'],
'p': nlp['p']
}
if solver=='qrqp':
optTemp = {'qpsol': 'qrqp'}
if Options is not None:
Options.update(optTemp)
else:
Options = optTemp
S = ca.nlpsol('S', 'sqpmethod', MPC_prob, Options)
elif solver=='ipopt':
if Options is None:
if 'tol' not in Options.keys():
Options.update({'tol': 1e-4})
if 'max_iter' not in Options.keys():
Options.update({'max_iter': 100})
S = ca.nlpsol('S', 'ipopt', MPC_prob, Options)
zGuess = ca.MX.sym('zGuess', MPC_prob['x'].shape)
X0p = ca.MX.sym('X0p', nX, 1)
if nP > 0:
Pp = ca.MX.sym('Pp', nP, 1)
Xrefp = ca.MX.sym('Xrefp', nY, 1)
Qp = ca.MX.sym('Qp', nY, nY)
Qtp = ca.MX.sym('Qtp', nY, nY)
Rp = ca.MX.sym('Rp', nU, nU)
pVal = pIn = ca.vertcat(X0p, Xrefp, Qp.reshape((-1, 1)), Qtp.reshape((-1, 1)), Rp.reshape((-1, 1)), Pp) if nP > 0 else ca.vertcat(X0p, Xrefp, Qp.reshape((-1, 1)), Qtp.reshape((-1, 1)), Rp.reshape((-1, 1)))
if GGN:
zOpp = ca.MX.sym('zOpp', z.shape)
pVal = ca.vertcat(
pVal,
zOpp
)
r = S(
x0=zGuess,
p=pVal,
lbg=casadi2List(lbg),
ubg=casadi2List(ubg),
lbx=casadi2List(lbx),
ubx=casadi2List(ubx),
)
In = [
zGuess,
X0p,
Xrefp,
Qp,
Qtp,
Rp]
InName = [
'zGuess',
'x0',
'xref',
'Q',
'Qt',
'R']
if nP > 0:
In += [Pp]
InName += ['P']
if GGN:
In.append(zOpp)
InName.append('zOp')
Out = [r['x']]
OutName = ['zOut']
Out.append(r['x'][0:nU])
OutName.append('uCalc')
Out.append(r['f'])
OutName.append('Cost')
return In, Out, InName, OutName |
# Generated by Django 2.2.5 on 2020-10-12 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nsp_project_app', '0024_notification_status'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='status',
field=models.BooleanField(blank=True, default=0, max_length=10, null=True),
),
]
|
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
event = []
for i in range(len(trips)):
nums, from_ , to_ = trips[i]
event.append([from_, nums])
event.append([to_, -nums])
event.sort()
cnt = 0
for i in range(len(event)):
cnt += event[i][1]
if cnt > capacity:
return False
return True
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
event = collections.defaultdict(int)
for num, begin, end in trips:
event[begin] += num
event[end] -= num
keys = sorted(event.keys())
passengers = 0
for key in keys:
passengers += event[key]
if passengers > capacity:
return False
return True
|
"""Contains a cog that fetches colors."""
# pylint: disable=C0103
import colorsys
import secrets
import colour
from sailor import commands
from sailor.exceptions import UserInputError
import webcolors
from sailor_fox.helpers import FancyMessage
BASE_URL_COLOR_API = "https://www.colourlovers.com/img/{0}/{1}/{2}/"
BASE_URL_TINEYE_MULTICOLR = "https://labs.tineye.com/multicolr/#colors={0};weights=100"
BASE_URL_COLOR_HEX = "https://www.color-hex.com/color/{0}"
BASE_URL_ENCYCOLORPEDIA = "https://encycolorpedia.com/{0}"
def rgb_to_hsv(red, green, blue):
"""Convert an RGB tuple to an HSV tuple."""
hue, saturation, value = colorsys.rgb_to_hsv(red/255, green/255, blue/255)
return int(hue*360), int(saturation*100), int(value*100)
def rgb_to_hls(red, green, blue):
"""Convert an RGB tuple to an HLS tuple."""
hue, lightness, saturation = colorsys.rgb_to_hls(red/255, green/255, blue/255)
return int(hue*360), int(lightness*100), int(saturation*100)
def color_from_string(color):
"""Given a string, get color info."""
try:
color = webcolors.name_to_hex(color)
except (ValueError, AttributeError):
pass
try:
if not color:
color = "#" + hex(secrets.randbelow(16777216)).replace("0x", "", 1)
color = colour.Color(color)
except Exception as error:
raise UserInputError((
"Not a valid color. Color must either be:\n"
"A) In hex format and between FFFFFF and 000000, or\n"
"B) A named CSS color (e.g. red or lightsteelblue)."
)) from error
return color
@commands.cooldown(6, 12)
@commands.command(aliases=["simplecolour", "scolor", "scolour"])
async def simplecolor(event, *, color: str = None):
"""Display a color, without detailed info. Accepts CSS color names and hex input.
* `color` - Either a CSS color or hex input.
"""
color = color_from_string(color)
message = []
message.append(event.f.bold(color.hex))
message.append(BASE_URL_COLOR_API.format(color.hex.lstrip("#"), 88, 88))
await event.reply("\n".join(message))
@commands.cooldown(6, 12)
@commands.command(name="color", aliases=["colour"])
async def color_(event, *, color: str = None):
"""Display a color, with detailed info. Accepts CSS color names and hex input.
* `color` - Either a CSS color or hex input.
"""
color = color_from_string(color)
color_hex_value = color.hex.lstrip("#")
message = FancyMessage(event.f)
color_as_rgb = tuple(round(255*x) for x in color.rgb)
color_as_rgba = color_as_rgb + (1.0,)
message.add_field(name="RGB", value=f"rgb{color_as_rgb}")
message.add_field(name="RGBA", value=f"rgba{color_as_rgba}")
message.add_field(name="HSV", value=rgb_to_hsv(*color_as_rgb))
message.add_field(name="HLS", value=rgb_to_hls(*color_as_rgb))
message.add_field(name="Hex", value=f"#{color_hex_value}")
message.add_field(name="Images", value=event.f.no_embed_link(BASE_URL_TINEYE_MULTICOLR.format(color_hex_value)))
information_links = (f"{event.f.no_embed_link(BASE_URL_COLOR_HEX.format(color_hex_value))}\n"
f"{event.f.no_embed_link(BASE_URL_ENCYCOLORPEDIA.format(color_hex_value))}")
message.add_line(event.f.bold("Information:"))
message.add_line(information_links)
message.add_line(event.f.bold("Note:"))
message.add_line("HSV and HLS may be slightly wrong due to floating point errors.")
image_url = BASE_URL_COLOR_API.format(color_hex_value, 88, 88)
message.add_line(image_url)
await event.reply(message)
|
# Variáveis
num_cont = num = val_maior = val_menor = media = val_media = 0
fim = 'S'
# Repetição
while fim != 'N':
# Variáveis int e string
num = int(input('Digite um valor: '))
fim = str(input('Você quer continuar? [S/N] ').upper())
# Variáveis média
# Ele vai ser o valor que vai dividir com o val_media
media += 1
# Ele vai somar todos os números e depois vai dividir com o media
val_media += num
# Ele vai contar quantas vezes ja foi executado o While
num_cont += 1
# IFS
if num_cont == 1:
val_menor = val_maior = num
if val_maior < num:
val_maior = num
elif val_menor > num:
val_menor = num
# Agora o val_media vai dividir com a media
media = val_media / media
# Prints
print(f'A média dos valores é: {media:.1f}')
print(f'O maior valor foi o {val_maior}, o menor valor foi {val_menor}')
|
# Copyright 2017-2020 by the Viziphant team, see `doc/authors.rst`.
# License: Modified BSD, see LICENSE.txt for details.
from elephant.utils import check_same_units as check_same_units_single
def check_same_units(spiketrains):
if isinstance(spiketrains[0], (list, tuple)):
for sts in spiketrains:
# check within a population
check_same_units_single(sts)
# check that units match across populations
check_same_units_single([sts[0] for sts in spiketrains])
else:
# a list of neo.SpikeTrain
check_same_units_single(spiketrains)
|
import torch
from torch import nn
from torchdrug import core, layers
from torchdrug.layers import functional
from torchdrug.core import Registry as R
@R.register("models.GraphAF")
class GraphAutoregressiveFlow(nn.Module, core.Configurable):
"""
Graph autoregressive flow proposed in `GraphAF: a Flow-based Autoregressive Model for Molecular Graph Generation`_.
.. _GraphAF\: a Flow-based Autoregressive Model for Molecular Graph Generation:
https://arxiv.org/pdf/2001.09382.pdf
Parameters:
model (nn.Module): graph representation model
prior (nn.Module): prior distribution
use_edge (bool, optional): use edge or not
num_flow_layer (int, optional): number of conditional flow layers
num_mlp_layer (int, optional): number of MLP layers in each conditional flow
dequantization_noise (float, optional): scale of dequantization noise
"""
def __init__(self, model, prior, use_edge=False, num_layer=6, num_mlp_layer=2, dequantization_noise=0.9):
super(GraphAutoregressiveFlow, self).__init__()
self.model = model
self.prior = prior
self.use_edge = use_edge
self.input_dim = self.output_dim = prior.dim
self.dequantization_noise = dequantization_noise
assert dequantization_noise < 1
self.layers = nn.ModuleList()
for i in range(num_layer):
condition_dim = model.output_dim * (3 if use_edge else 1)
self.layers.append(layers.ConditionalFlow(self.input_dim, condition_dim,
[model.output_dim] * (num_mlp_layer - 1)))
def _standarize_edge(self, graph, edge):
if edge is not None:
edge = edge.clone()
if (edge[:, :2] >= graph.num_nodes.unsqueeze(-1)).any():
raise ValueError("Edge index exceeds the number of nodes in the graph")
edge[:, :2] += (graph.num_cum_nodes - graph.num_nodes).unsqueeze(-1)
return edge
def forward(self, graph, input, edge=None, all_loss=None, metric=None):
"""
Compute the log-likelihood for the input given the graph(s).
Parameters:
graph (Graph): :math:`n` graph(s)
input (Tensor): discrete data of shape :math:`(n,)`
edge (Tensor, optional): edge list of shape :math:`(n, 2)`.
If specified, additionally condition on the edge for each input.
all_loss (Tensor, optional): if specified, add loss to this tensor
metric (dict, optional): if specified, output metrics to this dict
"""
if self.use_edge and edge is None:
raise ValueError("`use_edge` is true, but no edge is provided")
edge = self._standarize_edge(graph, edge)
node_feature = functional.one_hot(graph.atom_type, self.model.input_dim)
feature = self.model(graph, node_feature, all_loss, metric)
node_feature = feature["node_feature"]
graph_feature = feature["graph_feature"]
if self.use_edge:
condition = torch.cat([node_feature[edge], graph_feature.unsqueeze(1)], dim=1).flatten(1)
else:
condition = graph_feature
x = functional.one_hot(input, self.input_dim)
x = x + self.dequantization_noise * torch.rand_like(x)
log_dets = []
for layer in self.layers:
x, log_det = layer(x, condition)
log_dets.append(log_det)
log_prior = self.prior(x)
log_det = torch.stack(log_dets).sum(dim=0)
log_likelihood = log_prior + log_det
log_likelihood = log_likelihood.sum(dim=-1)
return log_likelihood # (batch_size,)
def sample(self, graph, edge=None, all_loss=None, metric=None):
"""
Sample discrete data based on the given graph(s).
Parameters:
graph (Graph): :math:`n` graph(s)
edge (Tensor, optional): edge list of shape :math:`(n, 2)`.
If specified, additionally condition on the edge for each input.
all_loss (Tensor, optional): if specified, add loss to this tensor
metric (dict, optional): if specified, output metrics to this dict
"""
if self.use_edge and edge is None:
raise ValueError("`use_edge` is true, but no edge is provided")
edge = self._standarize_edge(graph, edge)
node_feature = functional.one_hot(graph.atom_type, self.model.input_dim)
feature = self.model(graph, node_feature, all_loss, metric)
node_feature = feature["node_feature"]
graph_feature = feature["graph_feature"]
if self.use_edge:
condition = torch.cat([node_feature[edge], graph_feature.unsqueeze(1)], dim=1).flatten(1)
else:
condition = graph_feature
x = self.prior.sample(len(graph))
for layer in self.layers[::-1]:
x, log_det = layer.reverse(x, condition)
output = x.argmax(dim=-1)
return output # (batch_size,) |
'''
we need to
1-do something
2-save it to database
3-log all these process
instead of doing this everytime, we simply define a facade class that has a method to do it'''
class SomeBusiness():
def DoSomething(self):
pass
class SomeRepository():
def SaveSomething(self, thing):
pass
class SomeLogger:
def Log(self, message):
pass
class SomeFacade:
def __init__(self):
self.business = SomeBusiness()
self.repository = SomeRepository()
self.logger = SomeLogger()
def DoSomething(self):
done = self.business.DoSomething()
self.logger.Log("done")
saved = self.repository.SaveSomething(done)
self.logger.Log("saved")
def main():
facade = SomeFacade()
facade.DoSomething()
if __name__ == "__main__":
main()
|
# AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/11_modeling.ipynb (unless otherwise specified).
__all__ = ['MultiModalBertModel']
# Cell
# nbdev_comment from __future__ import absolute_import, division, print_function
import json
import tensorflow as tf
import transformers
from loguru import logger
from .params import Params
from .utils import (get_embedding_table_from_model,
get_shape_list, load_transformer_model)
from .embedding_layer.base import DefaultMultimodalEmbedding
class MultiModalBertModel(tf.keras.Model):
def __init__(self, params: Params, use_one_hot_embeddings=False):
super(MultiModalBertModel, self).__init__()
self.params = params
if self.params.init_weight_from_huggingface:
self.bert_model = load_transformer_model(
self.params.transformer_model_name, self.params.transformer_model_loading)
else:
self.bert_model = load_transformer_model(
self.params.bert_config, self.params.transformer_model_loading)
self.bert_model(tf.convert_to_tensor(
transformers.file_utils.DUMMY_INPUTS))
self.use_one_hot_embeddings = use_one_hot_embeddings
# multimodal input dense
self.embedding_layer = self.bert_model.get_input_embeddings()
self.multimoda_embedding = self.params.embedding_layer['model'](
params=self.params, embedding_layer=self.embedding_layer)
@tf.function
def call(self, inputs, training=False):
emb_inputs, embedding_tup = self.multimoda_embedding(inputs, training)
self.embedding_output = embedding_tup.word_embedding
self.model_input_mask = embedding_tup.res_input_mask
self.model_token_type_ids = embedding_tup.res_segment_ids
outputs = self.bert_model(
{'input_ids': None,
'inputs_embeds': self.embedding_output,
'attention_mask': self.model_input_mask,
'token_type_ids': self.model_token_type_ids,
'position_ids': None},
training=training
)
self.sequence_output = outputs.last_hidden_state
if 'pooler_output' in outputs:
self.pooled_output = outputs.pooler_output
else:
# no pooled output, use mean of token embedding
self.pooled_output = tf.reduce_mean(
outputs.last_hidden_state, axis=1)
outputs['pooler_output'] = self.pooled_output
self.all_encoder_layers = tf.stack(outputs.hidden_states, axis=1)
outputs = {k: v for k, v in outputs.items() if k not in (
'hidden_states', 'attentions')}
outputs['model_input_mask'] = self.model_input_mask
outputs['model_token_type_ids'] = self.model_token_type_ids
outputs['all_encoder_layers'] = self.all_encoder_layers
outputs['embedding_output'] = self.embedding_output
outputs['embedding_table'] = self.embedding_layer.weights[0]
return emb_inputs, outputs
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return get_embedding_table_from_model(self.bert_model)
def get_input_mask(self):
return self.model_input_mask
def get_token_type_ids(self):
return self.model_token_type_ids
|
# Copyright 2015 Michael DeHaan <michael.dehaan/gmail>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto
import boto.ec2
import boto.ec2.blockdevicemapping
import socket
import time
from strider.common.instance_data import InstanceData, SshData
import strider.common.logger
class EC2(object):
# TODO: remove _instance_id hack if possible
def __init__(self, name=None, region=None, access_key_id=None,
secret_access_key=None, security_token=None, image_id=None,
instance_type=None, key_name=None, security_groups=None, subnet_id=None,
ssh=None, user_data=None, tags=None, instance_profile_name=None,
block_device_map=None, bake_name=None, bake_description=None,
profile_name=None):
self.name = name
self.region = region
self.access_key_id = access_key_id
self.region = region
self.secret_access_key = secret_access_key
self.security_token = security_token
self.image_id = image_id
self.instance_type = instance_type
self.key_name = key_name
self.security_groups = security_groups
self.subnet_id = subnet_id
self.ssh = ssh
self.user_data = user_data
self.tags = tags
self.instance_profile_name = instance_profile_name
self.block_device_map = block_device_map
self.bake_name = bake_name
self.bake_description = bake_description
self.profile_name = profile_name
# utility instances
self.log = strider.utils.logger.get_logger('EC2')
# check for required args
if not self.name:
raise Exception("'name' is required")
if not self.instance_type:
raise Exception("'instance_type' is required")
if self.tags and type(self.tags) != dict:
raise Exception("expecting 'tags' to be a dictionary")
if type(self.ssh) != dict:
raise Exception("expecting 'ssh' to be a dictionary")
if not self.security_groups:
raise Exception("'security_groups' are required")
# coerce inputs
self.tags['Name'] = self.name
self.block_device_map = self._transform_block_device_map()
self.connection = self._connect()
# --------------------------------------------------------------------------
# PUBLIC VIRT API INTERFACE
# --------------------------------------------------------------------------
def exists(self):
""" Is the requested instance available? If no, spun it up. """
return self.describe().present
# --------------------------------------------------------------------------
def describe(self):
""" Return details about the instance. Standardized between cloud providers """
details = self._details()
if details is None:
return InstanceData(present=False)
else:
username = self.ssh['username']
private_key_path = self.ssh['private_key_path']
public_ip = self.ssh.get('public_ip', True)
port = self.ssh.get('port', 22)
host = details.ip_address
if not public_ip:
host = details.private_ip_address
ssh_data = SshData(keyfile=private_key_path, user=username, host=host, port=port)
return InstanceData(present=True, provider_specific=details, ssh=ssh_data)
# --------------------------------------------------------------------------
def bake(self):
""" Create cloud images from an already running instance """
self.log("baking AMI")
instance_id = self.describe().provider_specific.id
ami_id = self.connection.create_image(instance_id, self.bake_name,
self.bake_description, no_reboot=True,
block_device_mapping=self.block_device_map)
self.log("AMI ID: %s" % ami_id)
return ami_id
# --------------------------------------------------------------------------
def up(self):
""" Instantiate instances if needed, otherwise just start them """
self.log("determining if we need to create an instance")
me = self.describe().provider_specific
if me is None:
self.log("creating an instance")
reservation = self.connection.run_instances(
image_id = self.image_id, min_count = 1, max_count = 1,
key_name = self.key_name, user_data = self.user_data,
addressing_type = None, subnet_id = self.subnet_id,
instance_type = self.instance_type,
instance_profile_name = self.instance_profile_name,
security_group_ids = self.security_groups,
block_device_map = self.block_device_map
)
self.log("instance created")
self._tag_instances(reservation)
self._start_instances(reservation)
else:
self.log("instance already exists, starting if needed")
self.connection.start_instances([me.id])
me = self.describe()
if not me.present:
raise Exception("unexpectedly can't find the instance.")
# --------------------------------------------------------------------------
def destroy(self):
""" Destroy the described instance """
self.log("looking for instances to destroy")
me = self.describe()
if me.present:
self.log("destroying instance")
self.connection.terminate_instances(instance_ids=[me.provider_specific.id])
self.log("instance destroyed")
else:
self.log("no instance found to destroy")
# --------------------------------------------------------------------------
# PRIVATE FUNCTIONS
# --------------------------------------------------------------------------
def _connect(self):
""" Connect to the cloud provider, raising an exception on failure """
self.log("connecting...")
conn = boto.ec2.connect_to_region(
self.region,
aws_access_key_id = self.access_key_id,
aws_secret_access_key = self.secret_access_key,
security_token = self.security_token,
profile_name = self.profile_name
)
self.log("connected")
return conn
# --------------------------------------------------------------------------
def _details(self):
""" Return the cloud provider's info about the described instance"""
reservations = self.connection.get_all_instances(
instance_ids=None, filters=None, dry_run=False, max_results=None)
# find the first matching instance that is not terminating/terminated
for reservation in reservations:
for instance in reservation.instances:
if "Name" in instance.tags and instance.tags["Name"] == self.name:
if instance.state not in [ 'terminating', 'terminated', 'pending', 'shutting-down' ]:
return instance
return None
# --------------------------------------------------------------------------
def _start_instances(self, reservation):
""" Start the instances, harmless if not already running. """
self.log("starting instance")
instance_ids = [ x.id for x in reservation.instances ]
self.connection.start_instances(instance_ids, dry_run=False)
self.log("waiting for instance to start: %s (waiting 10 seconds...)" % instance_ids)
while True:
time.sleep(10)
reservations = self.connection.get_all_instances(instance_ids=instance_ids)
for reservation in reservations:
for instance in reservation.instances:
if instance.state == 'running':
self.log("instance has started")
return
# --------------------------------------------------------------------------
def _transform_block_device_map(self):
""" Reformat the user-friendly device map from striderfile into something boto likes """
if self.block_device_map is None:
return None
bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping()
for (k,v) in self.block_device_map.iteritems():
bdm[k] = boto.ec2.blockdevicemapping.EBSBlockDeviceType()
bdm[k].size = v['size']
for prop in [ 'ephemeral_name', 'no_device', 'volume_id',
'snapshot_id', 'status', 'attach_time', 'delete_on_termination',
'size', 'volume_type', 'iops', 'encrypted' ]:
if prop in v:
self.log("EBS property: %s => %s" % (prop, v[prop]))
setattr(bdm[k], prop, v[prop])
return bdm
# --------------------------------------------------------------------------
def _tag_instances(self, reservation):
""" Apply specified tags to the instances """
self.log("tagging instance, tags=%s" % self.tags)
for instance in reservation.instances:
self.connection.create_tags([instance.id], self.tags)
self.log("tagging complete")
|
#!/usr/bin/env python
# Code for Brian Corteil's Tiny 4WD robot, based on his original code,
# but using appproxeng libraries to allow genuine PS3 support
# (see https://github.com/ApproxEng/approxeng.input.git)
# Imported from https://github.com/EmmaNorling/Tiny4WD/blob/master/TinyPirate.py
# Load library functions we want
from approxeng.input.selectbinder import ControllerResource
from time import sleep
from explorerhat import motor
# Setup
maxPower = 1.0
power_left = 0.0
power_right = 0.0
x_axis = 0.0
y_axis = 0.0
def mixer(inYaw, inThrottle,):
left = inThrottle + inYaw
right = inThrottle - inYaw
scaleMax = max(1, abs(left), abs(right))
results = [left/scaleMax, right/scaleMax]
return results
try:
print('Press CTRL+C to quit')
# Loop indefinitely
with ControllerResource(dead_zone=0.1, hot_zone=0.2) as joystick:
while True:
# Loop, printing the corrected value from the left stick
x_axis = joystick['lx']
y_axis = joystick['ly']
# Don't be too spammy!
sleep(0.1)
mixer_results = mixer(x_axis, y_axis)
#print (mixer_results)
power_left = int(mixer_results[0]*100)
power_right = int(mixer_results[1]*100)
#print("left: " + str(power_left) + " right: " + str(power_right))
motor.one.speed((-power_right * maxPower))
motor.two.speed(power_left * maxPower)
except KeyboardInterrupt:
# CTRL+C exit, disable all drives
print("stop")
motor.stop()
print("bye")
|
'''
Author: flwfdd
Date: 2022-01-03 13:44:03
LastEditTime: 2022-01-20 23:39:25
Description: 配置文件
_(:з」∠)_
'''
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36 Edg/80.0.361.66',
}
api_base_url = {
"C": "http://x.x.x.x:3000", # 网易云音乐API
"Q": "http://x.x.x.x:3300", # QQ音乐API
}
# bilibili歌曲缓存,需要存入歌曲并提供url,这里使用阿里云OSS,可根据需求更换
if True: # 使用阿里云OSS
import oss2
oss_auth = oss2.Auth('xxx',
'xxx')
oss_bucket = oss2.Bucket(
oss_auth, 'http://oss-cn-hangzhou.aliyuncs.com', 'xxx')
oss_path = "ori/bili/"
oss_url = "https://xxx.oss-cn-hangzhou.aliyuncs.com/"+oss_path
# 实现以下函数即可
# 检查缓存文件是否存在,如果存在返回链接,否则返回空字符串
def check_tmp(filename):
if oss_bucket.object_exists(oss_path+filename):
return oss_url+filename+"?x-oss-traffic-limit=819200"
else:
return ""
# 储存文件并返回链接
def save_tmp(filename, bin):
oss_bucket.put_object(oss_path+filename, bin)
return oss_url+filename+"?x-oss-traffic-limit=819200"
else: #使用腾讯云COS
from qcloud_cos_v5 import CosConfig
from qcloud_cos_v5 import CosS3Client
secret_id = '' # 替换为用户的 SecretId,请登录访问管理控制台进行查看和管理,https://console.cloud.tencent.com/cam/capi
secret_key = '' # 替换为用户的 SecretKey,请登录访问管理控制台进行查看和管理,https://console.cloud.tencent.com/cam/capi
region = 'ap-chengdu' # 替换为用户的 region,已创建桶归属的region可以在控制台查看,https://console.cloud.tencent.com/cos5/bucket
# COS支持的所有region列表参见https://cloud.tencent.com/document/product/436/6224
token = None # 如果使用永久密钥不需要填入token,如果使用临时密钥需要填入,临时密钥生成和使用指引参见https://cloud.tencent.com/document/product/436/14048
scheme = 'https' # 指定使用 http/https 协议来访问 COS,默认为 https,可不填
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token, Scheme=scheme)
client = CosS3Client(config)
cos_path = "bili/"
cos_url = "https://xxx.cos.ap-chengdu.myqcloud.com/"+cos_path
# 实现以下函数即可
# 检查缓存文件是否存在,如果存在返回链接,否则返回空字符串
def check_tmp(filename):
if client.object_exists("xxx",cos_path+filename):
return cos_url+filename+"?x-cos-traffic-limit=819200"
else:
return ""
# 储存文件并返回链接
def save_tmp(filename, bin):
client.put_object("xxx",bin,cos_path+filename)
return cos_url+filename+"?x-cos-traffic-limit=819200"
# 网易云账号cookie
C_vip_cookie = ""
# QQ音乐账号cookie
Q_vip_cookie = "" |
from copy import copy
import os
import numpy as np
import scipy.signal as _signal
import scipy.interpolate as _interp
from scipy.signal import hilbert as analytic
from scipy import ndimage
def gauss2(X, Y, mu, sigma, normalize=True):
""" Evaluates Gaussian over points of X,Y
"""
# evaluates Gaussian over X,Y
D = sigma[0, 0]*sigma[1, 1] - sigma[0, 1]*sigma[1, 0]
B = np.linalg.inv(sigma)
X = X - mu[0]
Y = Y - mu[1]
Z = B[0, 0]*X**2. + B[0, 1]*X*Y + B[1, 0]*X*Y + B[1, 1]*Y**2.
Z = np.exp(-0.5*Z)
if normalize:
Z *= (2.*np.pi*np.sqrt(D))**(-1.)
return Z
def backtrack2(f0, g0, x1, f1, b1=0.1, b2=0.5):
""" Safeguarded parabolic backtrack
"""
# parabolic backtrack
x2 = -g0*x1**2/(2*(f1-f0-g0*x1))
# apply safeguards
if x2 > b2*x1:
x2 = b2*x1
elif x2 < b1*x1:
x2 = b1*x1
return x2
def backtrack3(f0, g0, x1, f1, x2, f2):
""" Safeguarded cubic backtrack
"""
raise NotImplementedError
def polyfit2(x, f):
# parabolic fit
i = np.argmin(f)
p = np.polyfit(x[i-1:i+2], f[i-1:i+2], 2)
if p[0] > 0:
return -p[1]/(2*p[0])
else:
print(-1)
raise Exception()
def lsq2(x, f):
# parabolic least squares fit
p = np.polyfit(x, f, 2)
if p[0] > 0:
return -p[1]/(2*p[0])
else:
print(-1)
raise Exception()
def angle(x,y):
xy = dot(x,y)
xx = dot(x,x)
yy = dot(y,y)
return np.arccos(xy/(xx*yy)**0.5)
def dot(x,y):
return np.dot(
np.squeeze(x),
np.squeeze(y))
def hilbert(w):
return np.imag(analytic(w))
infinity = np.inf
### finite difference
def nabla(V, h=[]):
""" Returns sum of first-order spatial derivatives of a function defined on
a 2D rectangular grid; generalizes Laplacian
"""
W = np.zeros(V.shape)
if h==[]:
h = np.ones((V.ndim, 1))
# interior
W[1:-1,1:-1] += (V[1:-1,2:] - V[1:-1,:-2])/(2.*h[0])
W[1:-1,1:-1] += (V[2:,1:-1] - V[:-2,1:-1])/(2.*h[1])
# top/bottom edges
W[0,1:-1] = (V[1,1:-1] - V[0,1:-1])/h[1] + (V[0,2:] - V[0,:-2])/(2.*h[0])
W[-1,1:-1] = (V[-1,1:-1] - V[-2,1:-1])/h[1] + (V[-1,2:] - V[-1,:-2])/(2.*h[0])
# left/right edges
W[1:-1,0] = (V[2:,0] - V[:-2,0])/(2.*h[1]) + (V[1:-1,1] - V[1:-1,0])/h[0]
W[1:-1,-1] = (V[2:,-1] - V[:-2,-1])/(2.*h[1]) + (V[1:-1,-1] - V[1:-1,-2])/h[0]
# corners
W[0,0] = (V[1,0] - V[0,0])/h[1] + (V[0,1] - V[0,0])/h[0]
W[0,-1] = (V[1,-1] - V[0,-1])/h[1] + (V[0,-2] - V[0,-1])/h[0]
W[-1,0] = (V[-2,0] - V[-1,0])/h[1] + (V[-1,1] - V[-1,0])/h[0]
W[-1,-1] = (V[-1,-1] - V[-2,-1])/h[1] + (V[-1,-1] - V[-1,-2])/h[0]
return W
def nabla2(V, h=[]):
""" Returns sum of second-order spatial derivatives of a function defined on
a 2D rectangular grid; generalizes Laplacian
"""
W = np.zeros(V.shape)
if h==[]:
h = np.ones((V.ndim, 1))
# interior
W[1:-1,1:-1] += (V[1:-1,2:] -2.*V[1:-1,1:-1] + V[1:-1,:-2])/h[0]**2
W[1:-1,1:-1] += (V[2:,1:-1] -2.*V[1:-1,1:-1] + V[:-2,1:-1])/h[1]**2
# left/right edges
W[0,1:-1] = W[1,1:-1]
W[-1,1:-1] = W[-2,1:-1]
# top/bottom edges
W[0,1:-1] = W[1,1:-1]
W[-1,1:-1] = W[-2,1:-1]
# corners
W[0,0] = (W[0,1] + W[1,0])/2
W[0,-1] = (W[0,-2] + W[1,-1])/2
W[-1,0] = (W[-1,1] + W[-2,0])/2
W[-1,-1] = (W[-1,-2] + W[-2,-1])/2
return W
def grad(V, h=[]):
""" Evaluates derivatives on a 2D rectangular grid
"""
ny, nx = V.shape
X = np.zeros((ny, nx))
Y = np.zeros((ny, nx))
if h==[]:
h = np.ones((V.ndim, 1))
# interior
X[:,1:-1] = (V[:,2:] - V[:,:-2])/(2.*h[0])
Y[1:-1,:] = (V[2:,:] - V[:-2,:])/(2.*h[1])
# left/right edges
X[:,0] = (V[:,1] - V[:,0])/h[1]
X[:,-1] = (V[:,-1] - V[:,-2])/h[1]
# top/bottom edges
Y[0,:] = (V[1,:] - V[0,:])/h[0]
Y[-1,:] = (V[-1,:] - V[-2,:])/h[0]
return X,Y
def tv(Z, h=[], epsilon=1.e-6):
nrow = Z.shape[0]
ncol = Z.shape[1]
Zx = (Z[:,1:] - Z[:,:-1])/h[0]
Zy = (Z[1:,:] - Z[:-1,:])/h[1]
top = np.zeros((nrow, ncol))
bot = np.zeros((nrow, ncol))
top[:,1:] += Zx
top[1:,:] += Zy
top[ :,-1] += Zx[:,-1]
top[-1, :] += Zy[-1,:]
bot[:,1:] += Zx**2
bot[1:,:] += Zy**2
bot[ :,-1] += Zx[:,-1]**2
bot[-1, :] += Zy[-1,:]**2
return top/(bot + epsilon*bot.max())**0.5
def get_mute_ratio(radius, maxradius, minratio):
# ratio = np.zeros(len(radius))
ratio = 1 - (1-minratio)*np.cos(np.pi/(2*maxradius)*radius)
ratio[np.where(radius>maxradius)] = 1
return ratio
def mysmooth(v, x, z, nx, nz, smo, fill, xlim=None, zlim=None, topolim=None):
""" Interpolates from an unstructured coordinates (mesh) to a structured
coordinates (grid)
"""
if xlim is None:
xmax = x.max()
xmin = x.min()
lx = xmax - xmin
else:
xmax = xlim[1]
xmin = xlim[0]
lx = xmax - xmin
if zlim is None:
zmax = z.max()
zmin = z.min()
lz = zmax - zmin
else:
zmax = zlim[1]
zmin = zlim[0]
lz = zmax - zmin
mesh = _stack(x, z)
dx = lx / (nx-1)
dz = lz / (nz-1)
# construct structured grid
x_grid = np.linspace(xmin, xmax, nx)
if topolim is not None:
z1 = np.linspace(zmin, topolim, nz)
z2 = np.linspace(topolim, zmax, nz * 2)
z_grid = np.hstack((z1, z2))
else:
z_grid = np.linspace(zmin, zmax, nz)
X, Z = np.meshgrid(x_grid, z_grid)
grid = _stack(X.flatten(), Z.flatten())
# interpolate to structured grid
v_tmp = myinterpolate(mesh, v, grid)
V = np.reshape(v_tmp, (len(z_grid), len(x_grid)))
# print(np.max(V))
# print(np.min(V))
# print('dx:%f'%dx)
# print('dz:%f'%dz)
if fill == 'constant':
cval = np.mean(V)
V = ndimage.gaussian_filter(V,sigma=smo,mode=fill,cval=cval)
elif fill == None:
V = ndimage.gaussian_filter(V, sigma=smo)
else:
V = ndimage.gaussian_filter(V, sigma=smo, mode=fill)
mesh1 = _stack(X.flatten(), Z.flatten())
grid1 = _stack(x, z)
v1 = V.flatten()
v_new = myinterpolate(mesh1, v1, grid1)
return v_new
def myinterpolate(mesh,v, grid):
V = _interp.griddata(mesh, v, grid, 'linear')
if np.any(np.isnan(V)):
W = _interp.griddata(mesh, v, grid, 'nearest')
for i in np.where(np.isnan(V)):
V[i] = W[i]
return V
def _stack(*args):
return np.column_stack(args) |
from databricks_cli.runs.api import RunsApi
from databricks_cli.workspace.api import WorkspaceApi
import os
import time
import base64
class PipelineRun:
def __init__(self, waiter, run_id):
self._waiter = waiter
self._run_id = run_id
def wait_for_completion(self, show_output=True, timeout_seconds=3600):
self._waiter(self._run_id, show_output, timeout_seconds)
def PipelineException(Exception):
"""An exception thrown when a pipeline fails."""
class Pipeline:
def __init__(self, workspace, steps: list):
self._workspace = workspace
self._steps = steps
self._runs = RunsApi(self._workspace._client)
def validate(self):
return True
def publish(self, name, description, version):
header = [
"# Databricks notebook source\n",
"# MAGIC %md\n",
f"# MAGIC # {name} ({version})\n",
f"# MAGIC {description}\n",
"\n",
"# COMMAND ----------\n",
"\n",
]
lines = []
libraries = set([])
for step in self._steps:
source = os.path.join(step.source_directory, step.script_name)
with open(source, "r") as f:
lines.append(f"# DBTITLE 1,{step.name}\n")
for line in f.readlines():
lines.append(line)
lines.append("\n")
lines.append("# COMMAND ----------\n")
lines.append("\n")
libraries |= set(step.runconfig.environment.get_libraries())
self._header = header
self._lines = lines
self._libraries = libraries
def submit(self, experiment_name) -> PipelineRun:
workspace_client = WorkspaceApi(self._workspace._client)
workspace_client.mkdirs("/Pipelines")
workspace_client.mkdirs("/Experiments")
lines = self._header
lines += [
"import mlflow\n",
f'mlflow.set_experiment("/Experiments/{experiment_name}")\n',
"\n",
"# COMMAND ----------\n",
"\n",
]
lines += self._lines
notebook = str.join("", lines)
path = f"/Pipelines/{experiment_name}"
rsp = self._workspace._client.perform_query(
method="POST",
path="/workspace/import",
data={
"content": base64.b64encode(notebook.encode("utf-8")).decode(
"utf-8"
),
"path": path,
"language": "PYTHON",
"overwrite": True,
"format": "SOURCE",
},
)
cluster_id = self._steps[0].compute_target.get("cluster_id")
json = {
"existing_cluster_id": cluster_id,
"notebook_task": {"notebook_path": path},
}
if len(self._libraries) > 0:
json["libraries"] = [{"pypi": {"package": p}} for p in self._libraries]
rsp = self._runs.submit_run(json)
return PipelineRun(
lambda r, o, t: self.wait(r, o, t), rsp.get("run_id")
)
def wait(self, run_id, show_output, timeout_seconds):
while timeout_seconds > 0:
rsp = self._runs.get_run(str(run_id))
state = rsp.get("state", {}).get("life_cycle_state")
if (
show_output is not None
and isinstance(show_output, str)
and show_output.lower() == "full"
):
print(rsp)
elif show_output:
print(state)
if state == "SKIPPED":
raise PipelineException("Job skipped")
if state == "INTERNAL_ERROR":
reason = rsp.get("reason")
raise PipelineException(f"Internal Error: {reason}")
if state == "TERMINATED":
if rsp.get("state", {}).get("result_state") == "FAILED":
run_url = rsp.get("run_page_url")
raise PipelineException(
f"Execution failed: run_page_url={run_url}"
)
break
timeout_seconds -= 5
time.sleep(5)
if state != "TERMINATED" and timeout_seconds <= 0:
run_url = rsp.get("run_page_url")
raise PipelineException(
f"Execution timed out: run_page_url={run_url}"
)
output = self._runs.get_run_output(str(run_id))
if (
show_output is not None
and isinstance(show_output, str)
and show_output.lower() == "full"
):
print(output)
elif show_output:
print(output.get("notebook_output").get("result"))
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError as err:
from distutils.core import setup
import os
def find_version():
for line in open(os.path.join('sgc', '__init__.py')):
if line.startswith('__version__'):
return line.split('=')[1].strip().strip('"').strip("'")
raise RuntimeError("Unable to find __version__ declaration")
version = find_version()
if __name__ == "__main__":
setup (
name = "SimpleGC",
version = version,
description = "GUI Library for Pygame",
author = "Sam Bull",
options = {
'sdist': {
'formats': ['gztar','zip'],
},
},
url = "https://launchpad.net/simplegc",
license = "BSD",
packages = ['sgc','sgc.widgets', 'sgc.widgets.composite', 'sgc.widgets._interface'],
package_dir = {
'sgc': 'sgc',
'sgc.widgets': os.path.join('sgc', 'widgets'),
'sgc.widgets.composite': os.path.join('sgc', 'widgets', 'composite'),
'sgc.widgets._interface': os.path.join('sgc', 'widgets', '_interface')
},
)
|
# Import modules.
import boto3
# Initiate AWS session with ec2-admin profile.
aws_session = boto3.session.Session(profile_name="inderpalaws02-ec2-admin")
# Initiate EC2 resource because collections exist for service resource.
ec2_resource = aws_session.resource(service_name="ec2",region_name="us-east-1")
# Initiate collection object.
ec2_collection = ec2_resource.instances
# print(dir(ec2_collection)) # Will print available functions for the object.
# In these functions, we'll focus on all(), limit(), filter() functions for now. Rest will be seen later.
# Print all instances using all() function. all() creates an iterable through which we'll iterate and print the instances.
print(f"\nAll ec2 instances are as below,")
ec2_iterator = ec2_collection.all()
for instance in ec2_iterator:
print(instance)
# Print only first x ec2 instances. Here, x=1.
print(f"\nThe first ec2 instance in the list of all instances is as below,")
ec2_iterator = ec2_collection.limit(1)
for instance in ec2_iterator:
print(instance)
# Print all instances using filter().
print(f"\nAll ec2 instances using filter(), are as below,")
ec2_iterator = ec2_collection.filter()
for instance in ec2_iterator:
print(instance)
# Print only the running instances using filter().
print(f"\nThe ec2 instances which are in running state are as below,")
filter1 = {"Name":"instance-state-name","Values":["running"]}
ec2_iterator = ec2_collection.filter(Filters=[filter1])
for instance in ec2_iterator:
print(instance)
# Print only the stopped instances using filter().
print(f"\nThe ec2 instances which are in stopped state are as below,")
filter1 = {"Name":"instance-state-name","Values":["stopped"]}
ec2_iterator = ec2_collection.filter(Filters=[filter1])
for instance in ec2_iterator:
print(instance)
# Print the running and stopped instances using filter().
print(f"\nThe ec2 instances which are in running and stopped state are as below,")
filter1 = {"Name":"instance-state-name","Values":["running","stopped"]}
ec2_iterator = ec2_collection.filter(Filters=[filter1])
for instance in ec2_iterator:
print(instance)
# Print the instances which are in stopped state and have instance type as t2.micro.
print(f"\nThe ec2 instances which are in stopped state and have instance type as t2.micro, are as below,")
filter1 = {"Name":"instance-state-name","Values":["stopped"]}
filter2 = {"Name":"instance-type","Values":["t2.micro"]}
ec2_iterator = ec2_collection.filter(Filters=[filter1,filter2])
for instance in ec2_iterator:
print(instance)
|
import vtk
import numpy as np
# Create points and cells for the spiral
nV = 256
nCyc = 10
rT1 = 0.2
rT2 = 0.5
rS = 4
h = 10
nTv = 8
points = vtk.vtkPoints()
for i in range(0, nV):
# Spiral coordinates
vX = rS * np.cos(2 * np.pi * nCyc * i / (nV - 1))
vY = rS * np.sin(2 * np.pi * nCyc * i / (nV - 1))
#vX = 10*np.cos(np.deg2rad(5*i))
vZ = h * i / nV
points.InsertPoint(i, vX, vY, vZ)
lines = vtk.vtkCellArray()
lines.InsertNextCell(nV)
for i in range(0, nV):
lines.InsertCellPoint(i)
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
polyData.SetLines(lines)
# Varying
tubeRadius = vtk.vtkDoubleArray()
tubeRadius.SetName("TubeRadius")
tubeRadius.SetNumberOfTuples(nV)
for i in range(0, nV):
tubeRadius.SetTuple1(i,rT1 + (rT2 - rT1) * np.sin(np.pi * i / (nV - 1)))
polyData.GetPointData().AddArray(tubeRadius)
polyData.GetPointData().SetActiveScalars("TubeRadius")
# RBG array(could add Alpha channel too I guess...)
# Varying from blue to red
colors = vtk.vtkUnsignedCharArray()
colors.SetName("Colors")
colors.SetNumberOfComponents(3)
colors.SetNumberOfTuples(nV)
for i in range(0, nV):
colors.InsertTuple3(i,
int(255 * i / (nV - 1)),
0,
int(255 * (nV - 1 - i) / (nV - 1)))
polyData.GetPointData().AddArray(colors)
tube = vtk.vtkTubeFilter()
tube.SetInputData(polyData)
tube.SetNumberOfSides(nTv)
tube.SetVaryRadiusToVaryRadiusByAbsoluteScalar()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
mapper.ScalarVisibilityOn()
mapper.SetScalarModeToUsePointFieldData()
mapper.SelectColorArray("Colors")
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.SetBackground(.2, .3, .4)
renderer.GetActiveCamera().Azimuth(30)
renderer.GetActiveCamera().Elevation(30)
renderer.ResetCamera()
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.AddRenderer(renderer)
renWin.SetSize(500, 500)
renWin.Render()
style = vtk.vtkInteractorStyleTrackballCamera()
iren.SetInteractorStyle(style)
iren.Start() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Author: Tim Henderson
#Email: [email protected]
#For licensing see the LICENSE file in the top level directory.
import os
from random import seed
from zss import (
simple_distance,
Node,
)
seed(os.urandom(15))
def test_empty_tree_distance():
assert simple_distance(Node(''), Node('')) == 0
assert simple_distance(Node('a'), Node('')) == 1
assert simple_distance(Node(''), Node('b')) == 1
def test_paper_tree():
A = (
Node("f")
.addkid(Node("d")
.addkid(Node("a"))
.addkid(Node("c")
.addkid(Node("b"))
)
)
.addkid(Node("e"))
)
B = (
Node("f")
.addkid(Node("c")
.addkid(Node("d")
.addkid(Node("a"))
.addkid(Node("b"))
)
)
.addkid(Node("e"))
)
#print A
#print
#print B
dist = simple_distance(A,B)
assert dist == 2
def test_simplelabelchange():
A = (
Node("f")
.addkid(Node("a")
.addkid(Node("h"))
.addkid(Node("c")
.addkid(Node("l"))))
.addkid(Node("e"))
)
B = (
Node("f")
.addkid(Node("a")
.addkid(Node("d"))
.addkid(Node("r")
.addkid(Node("b"))))
.addkid(Node("e"))
)
dist = simple_distance(A,B)
print dist
assert dist == 3
#print 'distance', d
def test_incorrect_behavior_regression():
A = (
Node("a")
.addkid(Node("b")
.addkid(Node("x"))
.addkid(Node("y"))
)
)
B = (
Node("a")
.addkid(Node("x"))
.addkid(Node("b")
.addkid(Node("y"))
)
)
dist = simple_distance(A, B)
print dist
assert dist == 2
|
import sys
from XenServer import XenServer
import traceback
try:
xs_session = XenServer().make_session()
pool = xs_session.xenapi.pool.get_all()[0]
pool_record = xs_session.xenapi.pool.get_record(pool)
sys.stdout.write("Successfully connected to pool: " + pool_record["name_label"]+'\n')
except Exception, e:
sys.stderr.write('ERROR: %s\n' % traceback.format_exc())
|
import torch.nn as nn
import torch.nn.functional as F
class Fpn(nn.Module):
def __init__(self):
super(Fpn, self).__init__()
self.upLayer1=nn.Sequential(
nn.Conv2d(512,256,1,1,0),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.upLayer2 = nn.Sequential(
nn.Conv2d(256, 256, 1, 1, 0),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.upLayer3 = nn.Sequential(
nn.Conv2d(128, 256, 1, 1, 0),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.center_size=7
def forward(self,x):
x0 = self.upLayer1(x[0])
x1 = self.upLayer2(x[1])
x2 = self.upLayer3(x[2])
x1=F.upsample_bilinear(x0,scale_factor=2)+x1
x2=F.upsample_bilinear(x1,scale_factor=2)+x2
if x2.size(3) < 20:
l = (x2.size(3) - self.center_size) // 2
r = l + self.center_size
x2 = x2[:, :, l:r, l:r]
else:
x2=F.upsample_bilinear(x1,size=(31,31))
x=x2
return x
class AdjustLayer(nn.Module):
def __init__(self, in_channels, out_channels, center_size=7):
super(AdjustLayer, self).__init__()
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.center_size = center_size
self.fpn=Fpn()
def forward(self, x):
x=self.fpn(x)
x = self.downsample(x)
return x |
#!/usr/bin/env python
# By Chris Paxton
# (c) 2017 The Johns Hopkins University
# See license for more details
import rospy
from costar_task_plan.robotics.tom.config import TOM_RIGHT_CONFIG as CONFIG
from sensor_msgs.msg import JointState
import tf
import tf_conversions.posemath as pm
from pykdl_utils.kdl_parser import kdl_tree_from_urdf_model
from pykdl_utils.kdl_kinematics import KDLKinematics
from urdf_parser_py.urdf import URDF
base_link = CONFIG['base_link']
end_link = CONFIG['end_link']
def goto(kdl_kin, pub, listener, trans, rot):
try:
T = pm.fromTf((trans, rot))
q0 = [-1.0719114121799995, -1.1008140645600006, 1.7366724169200003,
-0.8972388608399999, 1.25538042294, -0.028902652380000227,]
# DEFAULT
objt, objr = ((0.470635159016, 0.0047549889423, -0.428045094013),(0,0,0,1))
T_orig = pm.fromTf((objt,objr))
# MOVEd
objt, objr = ((0.52, 0.00, -0.43),(0,0,0,1))
T_new = pm.fromTf((objt,objr))
T_pose = pm.toMatrix(T)
Q = kdl_kin.inverse(T_pose, q0)
print "----------------------------"
print "[ORIG] Closest joints =", Q
msg = JointState(name=CONFIG['joints'], position=Q)
pub.publish(msg)
rospy.sleep(0.2)
T_goal = T_orig.Inverse() * T
T2 = T_new * T_goal
T2_pose = pm.toMatrix(T2)
Q = kdl_kin.inverse(T2_pose, q0)
print "[NEW] Closest joints =", Q
msg = JointState(name=CONFIG['joints'], position=Q)
pub.publish(msg)
rospy.sleep(0.2)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException), e:
pass
if __name__ == '__main__':
rospy.init_node('tom_simple_goto')
pub = rospy.Publisher('joint_states_cmd', JointState, queue_size=1000)
robot = URDF.from_parameter_server()
tree = kdl_tree_from_urdf_model(robot)
chain = tree.getChain(base_link, end_link)
kdl_kin = KDLKinematics(robot, base_link, end_link)
"""
position:
x: 0.648891402264
y: -0.563835865845
z: -0.263676911067
orientation:
x: -0.399888401484
y: 0.916082302699
z: -0.0071291983402
w: -0.0288384391252
"""
trans, rot = (0.64, -0.56, -0.26), (-0.4, 0.92, -0.01, -0.03)
rate = rospy.Rate(30)
listener = tf.TransformListener()
try:
while not rospy.is_shutdown():
goto(kdl_kin, pub, listener, trans, rot)
rate.sleep()
except rospy.ROSInterruptException, e:
pass
|
import sys
import pyvisa
import visa
from PyQt4 import QtGui, QtCore
from cmw500auto import Ui_CMW500AutomationTool
from cellPowerManager import CellPowerTest
class AttenuationManager(QtGui.QMainWindow):
def __init__(self):
QtGui.QWidget.__init__(self)
self.uiAM = Ui_CMW500AutomationTool()
self.uiAM.setupUi(self)
self.uiAM.labelDisconect.setText("<font color='red'>Disconnect</font>")
# self.uiAM.pushButtonStart.setEnabled(True)
# Signals and slots
self.uiAM.pushButtonConnect.clicked.connect(self.test_connection_to_cmw)
self.uiAM.pushButtonStart.clicked.connect(self.attenuation_test)
def test_connection_to_cmw(self):
okno, self.connect_address, ok = Connection_Test.cmw_connect()
okno = okno.split(',')
if ok:
if okno[0] == 'Rohde&Schwarz' and okno[1] == 'CMW':
self.uiAM.labelDisconect.setText("<font color='green'>Connected</font>")
self.uiAM.pushButtonStart.setEnabled(True)
QtGui.QMessageBox.information(self, 'Information',
'Connection to CMW-500 has been established correctly.\n\n'
+ okno[0] + ', ' + okno[1] + ',\n'
+ 'Serial no.: ' + okno[2] + ',\n'
+ 'Soft: ' + okno[3])
def attenuation_test(self):
self.connect_address='TCPIP0::192.168.100.1::hislip0::INSTR'
if self.uiAM.checkBoxRF1.isChecked() and self.uiAM.checkBoxRF2.isChecked():
self.cell_power = CellPowerTest(connect_address=self.connect_address,
cell_test_loop=self.uiAM.lineEditLoop.text(),
cell_1_power_min=self.uiAM.lineEditRF1min.text(),
cell_1_power_max=self.uiAM.lineEditRF1max.text(),
cell_1_step=self.uiAM.lineEditRF1Step.text(),
cell_1_time=self.uiAM.lineEditRF1Time.text(),
cell_2_power_min=self.uiAM.lineEditRF2min.text(),
cell_2_power_max=self.uiAM.lineEditRF2max.text(),
cell_2_step=self.uiAM.lineEditRF2Step.text(),
cell_2_time=self.uiAM.lineEditRF2Time.text(),
channel_to_test='0')
self.cell_power.start()
elif self.uiAM.checkBoxRF1.isChecked():
self.cell_power = CellPowerTest(connect_address=self.connect_address,
cell_1_power_min=self.uiAM.lineEditRF1min.text(),
cell_1_power_max=self.uiAM.lineEditRF1max.text(),
cell_1_step=self.uiAM.lineEditRF1Step.text(),
cell_1_time=self.uiAM.lineEditRF1Time.text(),
cell_test_loop=self.uiAM.lineEditLoop.text(),
channel_to_test='1')
self.cell_power.start()
elif self.uiAM.checkBoxRF2.isChecked():
self.cell_power = CellPowerTest(connect_address=self.connect_address,
cell_2_power_min=self.uiAM.lineEditRF2min.text(),
cell_2_power_max=self.uiAM.lineEditRF2max.text(),
cell_2_step=self.uiAM.lineEditRF2Step.text(),
cell_2_time=self.uiAM.lineEditRF2Time.text(),
cell_test_loop=self.uiAM.lineEditLoop.text(),
channel_to_test='2')
self.cell_power.start()
else:
QtGui.QMessageBox.warning(self, 'Warning', 'Select any checkBox under channels if you want to start test')
class Connection_Test(QtGui.QDialog):
def __init__(self, parent=None):
super(Connection_Test, self).__init__(parent)
# Rohde Schwarz zasoby
self.rm = visa.ResourceManager()
# Window structure
resourceLabel = QtGui.QLabel('Resource')
self.resourceWindow = QtGui.QTextEdit()
self.connectWindow = QtGui.QLineEdit()
self.connectWindow.setText('TCPIP0::192.168.100.1::hislip0::INSTR')
self.buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
ukladV = QtGui.QHBoxLayout()
# ukladV.addWidget(self.connectButton)
ukladV.addWidget(self.buttons)
uklad = QtGui.QGridLayout(self)
uklad.addWidget(resourceLabel, 0, 0)
uklad.addWidget(self.resourceWindow, 1, 0)
uklad.addWidget(self.connectWindow, 2, 0)
uklad.addLayout(ukladV, 3, 0, 3, 0)
# self.connectButton.clicked.connect(self.start_test_connection)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.setWindowTitle('Connection Manager')
self.setModal(True)
def find_resources(self):
resource = self.rm.list_resources()
# self.resourceWindow.clear()
for item in list(resource):
self.resourceWindow.append(item)
def start_test_connection(self):
try:
instr = self.rm.open_resource(self.connectWindow.text())
return (instr.query("*IDN?"), self.connectWindow.text())
except pyvisa.errors.VisaIOError as e:
QtGui.QMessageBox.warning(self, 'Error!!!', 'Error during connection to CMW-500:\n' + str(e),
QtGui.QMessageBox.Ok)
return ('Error during connection to CMW-500:\n' + str(e))
@staticmethod
def cmw_connect(parent=None):
okno = Connection_Test(parent)
okno.show()
okno.find_resources()
ok = okno.exec_()
result, connect_addres = okno.start_test_connection()
return (result, connect_addres, ok == QtGui.QDialog.Accepted)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
myapp = AttenuationManager()
myapp.show()
sys.exit(app.exec())
|
from cauldron import environ
from cauldron.session import projects
from cauldron.session.caching import SharedCache
class StepTestRunResult:
"""
This class contains information returned from running a step during testing.
"""
def __init__(
self,
step: 'projects.ProjectStep',
response: 'environ.Response'
):
self._step = step # type: projects.ProjectStep
self._response = response # type: environ.Response
self._locals = SharedCache().put(**step.test_locals)
@property
def local(self) -> SharedCache:
"""
Container object that holds all of the local variables that were
defined within the run step
"""
return self._locals
@property
def success(self) -> bool:
"""
Whether or not the step was successfully run. This value will be
False if there as an uncaught exception during the execution of the
step.
"""
return not self._response.failed
def echo_error(self) -> str:
"""
Creates a string representation of the exception that cause the step
to fail if an exception occurred. Otherwise, an empty string is returned.
:return:
The string representation of the exception that caused the running
step to fail or a blank string if no exception occurred
"""
if not self._response.errors:
return ''
return '{}'.format(self._response.errors[0].serialize()) |
from typing import Dict
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env import BaseEnv
from ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker
from ray.rllib.policy import Policy
def get_callback():
class Callbacks(DefaultCallbacks):
def on_episode_start(
self,
worker: RolloutWorker,
base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode,
**kwargs
):
pass
def on_episode_end(
self,
worker: RolloutWorker,
base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode,
**kwargs
):
pass
def on_train_result(self, trainer, result: dict, **kwargs):
pass
return Callbacks
|
# Copyright (C) 2015 Cisco, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author(s): Julian Edwards
"""Helper functions and classes for tests."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = [
'factory',
'OS_OBJECT_PREFIX',
]
try:
from itertools import imap
except ImportError:
# Python 3
imap = map
from itertools import (
islice,
repeat,
)
import random
import string
# Prefix used when creating Openstack objects.
OS_OBJECT_PREFIX = 'testiny-'
class Factory:
"""Class that defines helpers that make things for you."""
random_letters = imap(
random.choice, repeat(string.ascii_letters + string.digits))
def make_string(self, prefix="", size=10):
return prefix + "".join(islice(self.random_letters, size))
def make_obj_name(self, obj_type=""):
"""Create a random name for an Openstack object.
This will use a common prefix meant to identify quickly
all the Openstack objects created by a testiny run.
:param obj_type: Type of the created object. This will be
included in the name as a convenience to quickly identify
the type of an object based on its name.
"""
prefix = OS_OBJECT_PREFIX
if obj_type != "":
prefix = "%s%s-" % (prefix, obj_type)
return self.make_string(prefix=prefix)
# Factory is a singleton.
factory = Factory()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common.i18n import _
from ironic.conf import auth
opts = [
cfg.StrOpt('auth_strategy',
choices=[('noauth', _('no authentication')),
('keystone', _('use the Identity service for '
'authentication')),
('http_basic', _('HTTP basic authentication'))],
help=_('Authentication strategy used by JSON RPC. Defaults to '
'the global auth_strategy setting.')),
cfg.StrOpt('http_basic_auth_user_file',
default='/etc/ironic/htpasswd-json-rpc',
help=_('Path to Apache format user authentication file used '
'when auth_strategy=http_basic')),
cfg.HostAddressOpt('host_ip',
default='::',
help=_('The IP address or hostname on which JSON RPC '
'will listen.')),
cfg.PortOpt('port',
default=8089,
help=_('The port to use for JSON RPC')),
cfg.BoolOpt('use_ssl',
default=False,
help=_('Whether to use TLS for JSON RPC')),
cfg.StrOpt('http_basic_username',
deprecated_for_removal=True,
deprecated_reason=_("Use username instead"),
help=_("Name of the user to use for HTTP Basic authentication "
"client requests.")),
cfg.StrOpt('http_basic_password',
deprecated_for_removal=True,
deprecated_reason=_("Use password instead"),
secret=True,
help=_("Password to use for HTTP Basic authentication "
"client requests.")),
]
def register_opts(conf):
conf.register_opts(opts, group='json_rpc')
auth.register_auth_opts(conf, 'json_rpc')
conf.set_default('timeout', 120, group='json_rpc')
def list_opts():
return opts + auth.add_auth_opts([])
|
"""
Formatting of UML model elements into text tests.
"""
import pytest
import gaphor.UML.uml2 as UML
from gaphor.services.eventmanager import EventManager
from gaphor.UML import model
from gaphor.UML.elementfactory import ElementFactory
from gaphor.UML.umlfmt import format
from gaphor.UML.umllex import parse
@pytest.fixture
def factory():
event_manager = EventManager()
return ElementFactory(event_manager)
def add_tag_is_foo_metadata_field(e, factory):
s = factory.create(UML.Stereotype)
s.ownedAttribute = factory.create(UML.Property)
parse(s.ownedAttribute[0], "tag: str")
instance_spec = model.apply_stereotype(e, s)
slot = model.add_slot(instance_spec, s.ownedAttribute[0])
slot.value = "foo"
return slot
@pytest.mark.parametrize(
"text,formatted_text",
[
("", ""),
("not an attribute===foobar", "+ not an attribute===foobar"),
("myattr", "+ myattr"),
("myattr:int", "+ myattr: int"),
("- myattr:int[3]", "- myattr: int[3]"),
("- myattr:int[0..1]", "- myattr: int[0..1]"),
("/myattr:int", "+ /myattr: int"),
("myattr:int=3", "+ myattr: int = 3"),
],
)
def test_attribute(factory, text, formatted_text):
"""Test simple attribute formatting
"""
a = factory.create(UML.Property)
parse(a, text)
assert formatted_text == format(a)
def test_attribute_with_applied_stereotype(factory):
a = factory.create(UML.Property)
parse(a, "myattr: int")
add_tag_is_foo_metadata_field(a, factory)
assert '+ myattr: int { tag = "foo" }' == format(a, tags=True)
@pytest.mark.parametrize(
"text,name_part, mult_part",
[
("", "", ""),
("not an association end[3]", "+ not an association end[3]", ""),
("myattr", "+ myattr", ""),
("myattr[0..1]", "+ myattr", "0..1"),
("- myattr[0..1]", "- myattr", "0..1"),
],
)
def test_association_end(factory, text, name_part, mult_part):
"""Test simple attribute formatting
"""
a = factory.create(UML.Property)
a.association = factory.create(UML.Association)
parse(a, text)
assert (name_part, mult_part) == format(a)
def test_association_end_with_applied_stereotype(factory):
a = factory.create(UML.Property)
a.association = factory.create(UML.Association)
parse(a, "myattr[1]")
add_tag_is_foo_metadata_field(a, factory)
assert ("+ myattr", '1 { tag = "foo" }') == format(a)
@pytest.mark.parametrize(
"text,formatted_text",
[
("", ""),
("not an operation", "+ not an operation()"),
("+ myoper(param: str): int", "+ myoper(in param: str): int"),
("+ myoper(param: str = 'aap')", "+ myoper(in param: str = 'aap')"),
("- myoper(out param: str): int[2]", "- myoper(out param: str): int[2]"),
("- myoper(out param: str): int[0..3]", "- myoper(out param: str): int[0..3]"),
("- myoper(p1: str[2], p2:int[*])", "- myoper(in p1: str[2], in p2: int[*])"),
(
"- myoper(p1: str[2], p2:int[1..*])",
"- myoper(in p1: str[2], in p2: int[1..*])",
),
("+ (param: str): int", "+ (param: str): int"),
],
)
def test_operation(factory, text, formatted_text):
"""Test simple operation formatting
"""
o = factory.create(UML.Operation)
parse(o, text)
assert formatted_text == format(o)
def test_slot(factory):
a = factory.create(UML.Property)
parse(a, "myattr: int")
slot = add_tag_is_foo_metadata_field(a, factory)
assert 'tag = "foo"' == format(slot)
|
# -*- coding: utf-8 -*-
"""Tests for `mq_api`."""
import os
import sys
import unittest
from unittest.mock import patch
from modules.mq_api import (
run_mq_command,
check_not_empty_list,
add_annotation)
sys.path.append(os.getcwd())
def mock_execute_command(command):
"""Mock for `execute_command` function."""
return command
class TestRunMqCommand(unittest.TestCase):
@patch('modules.mq_api.execute_command', side_effect=mock_execute_command)
def test_run_mq_command(self, execute_command):
"""Tests for `run_mq_command` function."""
self.assertEqual(
run_mq_command(task='get_mq_managers'),
'dspmq')
self.assertEqual(
run_mq_command(task='get_mq_manager_status', mqm='TEST'),
'dspmq -m TEST -o all')
self.assertEqual(
run_mq_command(
task='get_listener',
mqm='TEST',
listener='LISTENER'),
'echo "display listener(LISTENER)"| runmqsc TEST')
class TestListFunctions(unittest.TestCase):
def test_check_not_empty_list(self):
"""Tests for `check_not_empty_list` function."""
self.assertEqual(check_not_empty_list(lis1=list()), 0)
self.assertEqual(check_not_empty_list(lis1=['test']), 1)
def test_add_annotation(self):
"""Tests for `add_annotation` function."""
self.assertEqual(add_annotation(lis1=list(), annotation='test'), [])
self.assertEqual(add_annotation(lis1=['test'], annotation='Good'), ['Good', 'test'])
if __name__ == '__main__':
unittest.main()
|
import os
import glob
import pathlib
import itertools
from functools import wraps
import inspect
from parse import parse as parse_
from pformat import *
__all__ = ['Paths', 'Path', 'tree', 'UnderspecifiedError']
def tree(root='.', paths=None, data=None):
'''Build paths from a directory spec.
Arguments:
root (str): the root directory.
paths (dict): the directory structure.
Returns:
The initialized Paths object
'''
if not isinstance(root, str):
root, paths = '.', root
paths = paths or {}
if isinstance(paths, (list, tuple, set)):
paths = {k: k for k in paths}
return Paths(
{v: Path(*k) for k, v in get_keys({'{root}': {'': 'root', **paths}})},
dict(data or {}, root=root))
def parse(pattern, s):
r = parse_(pattern, s)
return r and r.named
class UnderspecifiedError(KeyError):
pass
class Paths(object):
'''
Example
-------
paths = Paths.define('./blah/logs', {
'{log_id}': {
'model.h5': 'model',
'model_spec.pkl': 'model_spec',
'plots': {
'{step_name}': {
'{plot_name}.png': 'plot',
'': 'plot_dir'
}
}
}
})
paths = paths.format(root='logs', log_id='adfasdfasdf', step_name='epoch_100')
paths.model # logs/adfasdfasdf/model.h5
paths.plot # logs/adfasdfasdf/plots/epoch_100/{plot_name}.png
plot_files = glob.glob(paths['plot'].format(plot_name='*'))
'''
_paths = None
def __init__(self, paths, data=None):
self._paths = paths
self.data = {} if data is None else data
for path in self._paths.values():
path.parent = self
@wraps(tree)
@classmethod
def define(cls, *a, **kw):
return tree(*a, **kw)
@property
def paths(self):
return self._paths
@property
def copy(self):
return Paths({name: path.copy for name, path in self.paths.items()},
dict(self.data))
def add(self, root, paths):
'''Build paths from a directory spec.
Arguments:
root (str): the root directory.
paths (dict): the directory structure.
Returns:
The initialized Paths object
'''
root = (root if isinstance(root, Path) else self.paths[root]).path_pattern
paths = paths if isinstance(paths, Paths) else tree(paths)
# add paths to
self.paths.update(**{
k: p.repath(p.format_only(root=root))
for k, p in paths.paths.items()})
for path in paths.paths.values():
path.parent = self
return self
def __repr__(self):
return '<Paths data={} \n{}\n>'.format(self.data, '\n'.join([
'\t{} : {}'.format(name, self[name].maybe_format())
for name in self.paths
]))
def __contains__(self, path):
return path in self.paths
def __iter__(self):
return iter(self.paths)
def __getitem__(self, name):
return self._paths[name]
def __getattr__(self, name):
if name != '_paths' and self._paths and name in self._paths:
return self._paths[name]
raise AttributeError(name)
def parse(self, path, name):
'''Parse data from a formatted string (reverse of string format)
Arguments:
path (str): the string to parse
name (str): the name of the path pattern to use.
'''
return self[name].parse(path)
def translate(self, file, form, to, **kw):
return self[to].specify(**self[form].parse(file, **kw))
def makedirs(self):
'''Instantiate all fully specified directories.'''
for path in self.paths.values():
try:
path.make(up=1)
except UnderspecifiedError:
pass
def update(self, **kw):
'''Update format data in place.'''
return self.specify(inplace=True, **kw)
def specify(self, *, inplace=False, **kw):
'''Return a new Paths object with added variables for each pattern.'''
p = self if inplace else self.copy
p.data.update(kw)
return p
def unspecify(self, *keys, inplace=False):
'''Remove keys from paths dictionary'''
p = self if inplace else self.copy
for key in keys:
p.data.pop(key, None)
return p
@property
def fully_specified(self):
'''Are all paths fully specified?'''
return all(p.fully_specified for p in self.paths.values())
def format(self, **kw):
'''Return a dictionary where all fully specified paths are converted to strings
and underspecified strings are left as Path objects.
Arguments:
**kw: additional data specified for formatting.
'''
return {name: self[name].maybe_format(**kw) for name in self}
def partial_format(self, **kw):
'''Return a dictionary where all paths are converted to strings
and underspecified fields are left in for later formatting.
Arguments:
**kw: additional data specified for formatting.
'''
return {name: self[name].partial_format(**kw) for name in self}
def globs(self, *names):
return [f for name in names for f in self[name].glob()]
class Path(os.PathLike):
'''
# define a path with missing parts
path = Paths('blah/{something}/{huh}/what')
# update data in place
path.update(huh='um')
# not all fields are defined
assert not path.fully_specified
# partial format will fill available fields
assert str(path) == path.partial_format() == 'blah/{something}/um/what'
# format will throw an error because there are missing keys
try:
path.format()
assert False
except KeyError:
assert True
# glob_pattern will fill missing keys with an asterisk
assert path.glob_pattern == 'blah/*/um/what'
# missing will convert to glob pattern and return all matching files
assert isinstance(path.matching)
'''
__FORBIDDEN_KEYS__ = ()
def __init__(self, *path, data=None, parent=None):
self._path = pathlib.Path(*path)
self.data = {} if data is None else data
self.parent = parent
def __str__(self):
'''The path as a string (partially formatted)'''
return self.partial_format()
def __fspath__(self):
return self.format()
def __repr__(self):
return '<Path "{}" data={}>'.format(self.path_pattern, self.path_data)
# def __getattr__(self, name):
# return getattr(self.path, name)
def __contains__(self, substr):
return substr in self.partial_format()
def __truediv__(self, path):
return self.join(path)
def __add__(self, obj):
if isinstance(obj, str):
return self.repath(self.path_pattern + obj)
if isinstance(obj, dict):
return self.specify(**obj)
def __lshift__(self, n):
return self.up(n)
'''
Path Forms
'''
@property
def path_pattern(self):
'''The path as an unformatted string'''
return str(self._path)
@property
def path(self):
'''Get the formatted path as a pathlib.Path object'''
return pathlib.Path(self.format())
@property
def s(self):
'''Convert to string (partial_format)'''
return str(self)
@property
def f(self):
'''Convert to string (format)'''
return self.format()
'''
Data Manipulation
'''
@property
def path_data(self):
'''Both the path specific data and the paths group data'''
return {**self.parent.data, **self.data} if self.parent else self.data
def update(self, **kw):
'''Update specified data in place'''
self.data.update(**{k: v for k, v in kw.items() if k not in self.__FORBIDDEN_KEYS__})
return self
def specify(self, **kw):
'''Update specified data and return a new object'''
return self.copy.update(**kw)
def unspecify(self, *keys, parent=True):
'''Remove keys from path dictionary'''
p = self.copy
if parent and p.parent:
p.parent = p.parent.unspecify(*keys)
for key in keys:
p.data.pop(key, None)
return p
@property
def fully_specified(self):
'''Check if the path is fully specified.'''
try:
self.format()
return True
except KeyError:
return False
@property
def unspecified(self):
'''Get a path without any attached data.'''
return Path(self._path)
'''
Path Manipulation
'''
@property
def safe(self):
'''Make sure the path does not go above root.'''
return self.repath(os.path.normpath(os.sep + str(self._path)).lstrip(os.sep))
def repath(self, *f, data=None):
'''Make a copy with an entirely new path.'''
return Path(*f, data=dict(self.data, **(data or {})), parent=self.parent)
def join(self, *f):
'''Make a copy and append directories to the end.'''
return self.repath(self._path, *f)
def assign_name(self, name):
'''Assign a new name to '''
if self.parent:
self.parent.paths[name] = self
@property
def copy(self):
'''Create a copy of the path object.'''
return self.join()
def up(self, n=1):
'''Create a copy of the path object up one directory.'''
return self.repath(
os.path.normpath(os.path.join(self._path, *(['..']*n))))
def find_sibling(self, name):
'''Find another path in the root tree.'''
try:
return self.parent[name]
except (AttributeError, TypeError) as e:
raise AttributeError('No related paths are available.')
except KeyError as e:
raise KeyError('No related paths by that name are available.')
'''
Path Methods
'''
def exists(self):
return self.path.exists()
def is_file(self):
return self.path.is_file()
def is_dir(self):
return self.path.is_dir()
def read_text(self, *a, **kw):
return self.path.read_text(*a, **kw)
def write_text(self, *a, **kw):
return self.path.write_text(*a, **kw)
def rmdir(self, *a, **kw):
return self.path.rmdir(*a, **kw)
'''
Format
'''
def format(self, **kw):
'''Insert data into the path string. (Works like string format.)
Raises:
KeyError if the format string is underspecified.
'''
try:
return self.path_pattern.format(**{**self.path_data, **kw})
except KeyError as e:
raise UnderspecifiedError(str(e))
def parse(self, path, use_data=True):
'''Extract variables from a compiled path'''
pattern = self.partial_format() if use_data else self.path_pattern
data = parse(pattern, path)
if not data:
raise ValueError(inspect.cleandoc('''
Could not parse path using pattern.
path:{}
pattern:{}
`path.parse(path)` will call self.partial_format() by default before parsing
so any specified keys will be fixed. This is helpful to dodge ambiguous parsing
cases. To disable this pass `use_data=False` to parse.
'''.format(path, pattern)))
return {**self.path_data, **data}
def translate(self, path, to, **kw):
'''Translate the paths to another pattern'''
return self.find_sibling(to).specify(**self.parse(path, **kw))
def maybe_format(self, **kw):
'''Try to format a field. If it fails, return as a Path object.'''
p = self.specify(**kw)
try:
return p.format()
except KeyError:
return p
def partial_format(self, **kw):
'''Format a field, leaving all unspecified fields to be filled later.'''
f = pformat(self.path_pattern, **{**self.path_data, **kw})
return f
def format_only(self, **kw):
return pformat(self.path_pattern, **kw)
'''
Glob / File Patterns
'''
@property
def glob_pattern(self):
'''Format a field, setting all unspecified fields as a wildcard (asterisk).'''
return gformat(self.path_pattern, **self.path_data)
def glob(self, *f):
'''Find all matching files. unspecified fields are set as a wildcard (asterisk).'''
return sglob(self.glob_pattern, *f)
def iglob(self, *f):
'''Find all matching files as a generator.'''
return glob.iglob(os.path.join(self.glob_pattern, *f))
def rglob(self, *f, include=None):
'''Find all matching files recursively as a generator.'''
# if the path isn't an existing dir, assume it's a glob pattern
include = not self.is_dir() if include is None else include
fs = self.path.rglob(os.path.join(*(f or '*')))
return itertools.chain((
pathlib.Path(f) for f in self.glob()), fs) if include else fs
def next_unique(self, i=1, suffix='_{:02}'):
'''Get the next filename that doesn't exist.
e.g. Path('results/')
'''
f = self.format()
f_pattern = '{}{{}}{}'.format(*os.path.splitext(f))
sfx = suffix if callable(suffix) else suffix.format
while os.path.exists(f):
f, i = f_pattern.format(sfx(i)), i + 1
return f
def prefix(self, prefix='{prefix}_'):
return self.up().join('{}{}'.format(
prefix, os.path.basename(self.path_pattern)))
def suffix(self, suffix='_{suffix}'):
froot, ext = os.path.splitext(self.path_pattern)
return self.repath('{}{}{}'.format(froot, suffix, ext))
'''
Read / Write / Create / Remove
'''
def make(self, up=0):
'''Create this (or up a) directory.'''
os.makedirs(self.up(up), exist_ok=True)
return self
def touch(self, *a, **kw):
'''Touch this file - will recursively create parent directories.'''
self.make(up=1).path.touch(*a, **kw)
return self
def rm(self):
'''Remove this file or directory.'''
p = self.safe
p.rmdir() if self.is_dir() else os.remove(p.format()) if self.is_file() else None
return self
def rmglob(self, *f, include=None):
'''Recursively remove files matching join(*f). Set include=True, to
remove this node as well.'''
fs = list(sorted(self.safe.rglob(*f, include=include), key=lambda p: p.parts, reverse=True))
for fi in fs:
fi.rmdir() if fi.is_dir() else os.remove(fi)
return self
def write(self, x, mode='', **kw):
'''Write to file. Set mode='b' to write as bytes.'''
self.make(1)
b = 'b' in mode if mode else isinstance(x, (bytes, bytearray))
self.write_bytes(x, **kw) if b else self.write_text(str(x), **kw)
return self
def read(self, mode='', **kw):
'''Read file. Set mode='b' to read as bytes.'''
return self.read_bytes(**kw) if 'b' in mode else self.read_text(**kw)
def open(self, mode='r', *a, makedir=True, **kw):
if makedir and any(m in mode for m in ('wa' if makedir is True else makedir)):
self.up().make()
return self.path.open(mode, *a, **kw)
def move(self, f_new):
'''Move the file to a new name.'''
os.rename(self.format(), f_new)
return self.repath(f_new)
def sglob(*f):
'''Enhanced glob. Pass path parts and return sorted list of files.'''
return sorted(glob.glob(os.path.join(*f)))
def fbase(f, up=0):
'''Return the file basename up x directories.'''
return os.path.basename(os.path.abspath(os.path.join(f, *(['..']*up))))
def backup(path, mode='index'):
path = Path(path) if not isinstance(Path) else path
if path.exists():
if mode == 'index':
bkp_path = path.next_unique(1)
elif mode == 'bkp':
bkp_path = path + '.bkp'
elif mode == 'bkp':
bkp_path = path + '~'
os.rename(path, bkp_path)
print('moved existing file', path, 'to', bkp_path)
def get_keys(data, keys=None, iters_as_keys=False):
'''Recursively traverse a nested dict and return the trail of keys, and the final value'''
keys = tuple(keys or ())
for key, value in data.items():
keys_ = keys + (key,)
if isinstance(value, dict):
for keys_, val in get_keys(value, keys_, iters_as_keys):
yield keys_, val
elif iters_as_keys and isinstance(value, (tuple, list, set)):
for val in value:
yield keys_, val
else:
yield keys_, value
example = tree({
'data': {
'{date}': {
'': 'date',
'{labels_set}.csv': 'csv',
'flac': {
'': 'flac_root',
'{name}.flac': 'flac',
}
}
}
})
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: src/training_game.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='src/training_game.proto',
package='hexit',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x17src/training_game.proto\x12\x05hexit\"\x9e\x02\n\x0cTrainingGame\x12\x37\n\rmoveSnapshots\x18\x01 \x03(\x0b\x32 .hexit.TrainingGame.MoveSnapshot\x1a\xac\x01\n\x0cMoveSnapshot\x12!\n\x15normalizedVisitCounts\x18\x01 \x03(\x02\x42\x02\x10\x01\x12*\n\x06winner\x18\x02 \x01(\x0e\x32\x1a.hexit.TrainingGame.Player\x12#\n\x17squaresOccupiedByMyself\x18\x03 \x03(\x02\x42\x02\x10\x01\x12(\n\x1csquaresOccupiedByOtherPlayer\x18\x04 \x03(\x02\x42\x02\x10\x01\"&\n\x06Player\x12\n\n\x06MYSELF\x10\x00\x12\x10\n\x0cOTHER_PLAYER\x10\x01\x62\x06proto3')
)
_TRAININGGAME_PLAYER = _descriptor.EnumDescriptor(
name='Player',
full_name='hexit.TrainingGame.Player',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MYSELF', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTHER_PLAYER', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=283,
serialized_end=321,
)
_sym_db.RegisterEnumDescriptor(_TRAININGGAME_PLAYER)
_TRAININGGAME_MOVESNAPSHOT = _descriptor.Descriptor(
name='MoveSnapshot',
full_name='hexit.TrainingGame.MoveSnapshot',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='normalizedVisitCounts', full_name='hexit.TrainingGame.MoveSnapshot.normalizedVisitCounts', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='winner', full_name='hexit.TrainingGame.MoveSnapshot.winner', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='squaresOccupiedByMyself', full_name='hexit.TrainingGame.MoveSnapshot.squaresOccupiedByMyself', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='squaresOccupiedByOtherPlayer', full_name='hexit.TrainingGame.MoveSnapshot.squaresOccupiedByOtherPlayer', index=3,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=281,
)
_TRAININGGAME = _descriptor.Descriptor(
name='TrainingGame',
full_name='hexit.TrainingGame',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='moveSnapshots', full_name='hexit.TrainingGame.moveSnapshots', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TRAININGGAME_MOVESNAPSHOT, ],
enum_types=[
_TRAININGGAME_PLAYER,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=35,
serialized_end=321,
)
_TRAININGGAME_MOVESNAPSHOT.fields_by_name['winner'].enum_type = _TRAININGGAME_PLAYER
_TRAININGGAME_MOVESNAPSHOT.containing_type = _TRAININGGAME
_TRAININGGAME.fields_by_name['moveSnapshots'].message_type = _TRAININGGAME_MOVESNAPSHOT
_TRAININGGAME_PLAYER.containing_type = _TRAININGGAME
DESCRIPTOR.message_types_by_name['TrainingGame'] = _TRAININGGAME
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainingGame = _reflection.GeneratedProtocolMessageType('TrainingGame', (_message.Message,), dict(
MoveSnapshot = _reflection.GeneratedProtocolMessageType('MoveSnapshot', (_message.Message,), dict(
DESCRIPTOR = _TRAININGGAME_MOVESNAPSHOT,
__module__ = 'src.training_game_pb2'
# @@protoc_insertion_point(class_scope:hexit.TrainingGame.MoveSnapshot)
))
,
DESCRIPTOR = _TRAININGGAME,
__module__ = 'src.training_game_pb2'
# @@protoc_insertion_point(class_scope:hexit.TrainingGame)
))
_sym_db.RegisterMessage(TrainingGame)
_sym_db.RegisterMessage(TrainingGame.MoveSnapshot)
_TRAININGGAME_MOVESNAPSHOT.fields_by_name['normalizedVisitCounts']._options = None
_TRAININGGAME_MOVESNAPSHOT.fields_by_name['squaresOccupiedByMyself']._options = None
_TRAININGGAME_MOVESNAPSHOT.fields_by_name['squaresOccupiedByOtherPlayer']._options = None
# @@protoc_insertion_point(module_scope)
|
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Bootstraps the resources required to run the SageMaker integration tests.
"""
import boto3
import logging
import time
from common.aws import get_aws_account_id, get_aws_region, duplicate_s3_contents
from common.resources import random_suffix_name
from mq.bootstrap_resources import (
TestBootstrapResources,
VPC_CIDR_BLOCK,
VPC_SUBNET_CIDR_BLOCK,
)
def create_vpc() -> str:
region = get_aws_region()
ec2 = boto3.client("ec2", region_name=region)
logging.debug(f"Creating VPC with CIDR {VPC_CIDR_BLOCK}")
resp = ec2.create_vpc(
CidrBlock=VPC_CIDR_BLOCK,
)
vpc_id = resp['Vpc']['VpcId']
# TODO(jaypipes): Put a proper waiter here...
time.sleep(3)
vpcs = ec2.describe_vpcs(VpcIds=[vpc_id])
if len(vpcs['Vpcs']) != 1:
raise RuntimeError(
f"failed to describe VPC we just created '{vpc_id}'",
)
vpc = vpcs['Vpcs'][0]
vpc_state = vpc['State']
if vpc_state != "available":
raise RuntimeError(
f"VPC we just created '{vpc_id}' is not available. current state: {vpc_state}",
)
logging.info(f"Created VPC {vpc_id}")
return vpc_id
def create_subnet(vpc_id: str) -> str:
region = get_aws_region()
ec2 = boto3.client("ec2", region_name=region)
resp = ec2.create_subnet(
CidrBlock=VPC_SUBNET_CIDR_BLOCK,
VpcId=vpc_id,
)
subnet_id = resp['Subnet']['SubnetId']
# TODO(jaypipes): Put a proper waiter here...
time.sleep(3)
subnets = ec2.describe_subnets(SubnetIds=[subnet_id])
if len(subnets['Subnets']) != 1:
raise RuntimeError(
f"failed to describe subnet we just created '{subnet_id}'",
)
subnet = subnets['Subnets'][0]
subnet_state = subnet['State']
if subnet_state != "available":
raise RuntimeError(
f"Subnet we just created '{subnet_id}' is not available. current state: {subnet_state}",
)
logging.info(f"Created VPC Subnet {subnet_id}")
return subnet_id
def service_bootstrap() -> dict:
logging.getLogger().setLevel(logging.INFO)
vpc_id = create_vpc()
subnet_id = create_subnet(vpc_id)
return TestBootstrapResources(
vpc_id,
subnet_id,
).__dict__
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# copyright Julien TREBOSC 2012-2013
# calculates (S-S0)/S0 of a 2D dataset
# dataset must be series of 1D spectra alternated S/S0
# if a point of S0 spectrum is below the defined threshold
# then set S0 point to threshold if S0 below threshold
from __future__ import division
import sys
import brukerIO
import numpy as n
import argparse
parser = argparse.ArgumentParser(description='Calculate (S0-S)/S0 spectrum from 1D REDOR or RESPDOR with interleaved acquisition')
parser.add_argument('inputs', help='Full path of the dataset to process')
parser.add_argument('--threshold', '-t', required=True, help='threshold signal below which S0 is not significant')
parser.add_argument('--order', '-o', required=False, default='0', help='Order in 2D 0: S0-S / 1: S-S0')
args = parser.parse_args()
infile = args.inputs
threshold = float(args.threshold)
order = args.order
dat =brukerIO.dataset(brukerIO.splitprocpath(infile))
spect = dat.readspect1d()
# print("spectrum shape is ",spect.shape)
(si2,) = spect.shape
# print("si2=%d" % (si2,))
spect = spect.reshape(si2//2, 2)
if order == '0':
S0 = spect[:, 0]
S = spect[:, 1]
elif order == '1':
S = spect[:, 0]
S0 = spect[:, 1]
else: raise("order should be 0 (S0-S) or 1 (S-S0)")
for i in range(si2//2):
if S0[i] < threshold :
S0[i] = threshold*10
S[i] = threshold*10
Frac = (S0-S)/S0
# print "S=",S
# print "S0=",S0
# print "Frac=",Frac
dat.writespect1dri(Frac, Frac)
|
from functools import partial
import numpy as np
import pytest
import torch
from nnrl.utils import convert_to_tensor
from raylab.envs import get_env_creator
DECELERATION_ZONES = (
{"center": [[0.0, 0.0]], "decay": [2.0]},
{"center": [[5.0, 4.5], [1.5, 3.0]], "decay": [1.15, 1.2]},
)
@pytest.fixture(params=DECELERATION_ZONES)
def env_config(request):
return request.param
@pytest.fixture
def env_creator():
return get_env_creator("Navigation")
@pytest.fixture
def env(env_creator, env_config):
return env_creator(env_config)
@pytest.fixture(params=(1, 4))
def n_batch(request):
return request.param
@pytest.fixture(params=((), (1,), (2,)))
def sample_shape(request):
return request.param
def test_reward_fn(env):
obs = env.reset()
act = env.action_space.sample()
_obs, rew, _, _ = env.step(act)
obs_t, act_t, _obs_t = map(
partial(convert_to_tensor, device="cpu"), (obs, act, _obs)
)
rew_t = env.reward_fn(obs_t, act_t, _obs_t)
assert np.allclose(rew, rew_t.numpy())
def test_transition_fn_fidelity(env):
obs = env.reset()
act = env.action_space.sample()
torch.manual_seed(42)
_obs, _, _, _ = env.step(act)
obs_t, act_t = map(partial(convert_to_tensor, device="cpu"), (obs, act))
obs_t, act_t = map(lambda x: x.requires_grad_(True), (obs_t, act_t))
torch.manual_seed(42)
_obs_t, logp = env.transition_fn(obs_t, act_t)
assert _obs_t.grad_fn is not None
assert _obs_t.detach().numpy() in env.observation_space
assert np.allclose(_obs, _obs_t.detach().numpy())
assert logp.shape == ()
assert logp.dtype == torch.float32
assert logp.grad_fn is not None
def test_transition_fn_sampling(env, n_batch, sample_shape):
obs = [env.reset() for _ in range(n_batch)]
act = [env.action_space.sample() for _ in range(n_batch)]
obs_t, act_t = map(partial(convert_to_tensor, device="cpu"), (obs, act))
obs_t, act_t = map(lambda x: x.requires_grad_(True), (obs_t, act_t))
_obs_t, logp = env.transition_fn(obs_t, act_t, sample_shape)
assert _obs_t.shape == sample_shape + obs_t.shape
assert _obs_t.grad_fn is not None
assert logp.shape == sample_shape + (n_batch,)
assert logp.dtype == torch.float32
assert logp.grad_fn is not None
|
# coding: utf-8
# Copyright 2020 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the ibm_security_advisor_notifications_api_sdk service API operations
"""
import pytest
import unittest
import datetime
# import json
# import os
import ibm_cloud_security_advisor.notifications_api_v1
from ibm_cloud_security_advisor.notifications_api_v1 import *
from ibm_cloud_security_advisor import NotificationsApiV1
from ibm_cloud_sdk_core import BaseService
from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime
from unittest.mock import patch
from unittest import mock
m = mock.Mock()
class TestChannelResponseDefinition(unittest.TestCase):
app = {}
@classmethod
def setup_class(cls):
print("\nrunning setup preparation...")
channelResponseDefinitionSeverity = ChannelResponseDefinitionSeverity(
high=True, medium=True, low=True
)
channelResponseDefinitionAlertSourceItem= ChannelResponseDefinitionAlertSourceItem(
provider_name="abc", finding_types=['abc']
)
TestChannelResponseDefinition.app = ChannelResponseDefinition(
channel_id="abc", name="abc", description="abc",
type="abc", severity=channelResponseDefinitionSeverity,
endpoint="http://abc.com", enabled=True,
alert_source=[channelResponseDefinitionAlertSourceItem],
frequency="abc"
)
# read env vars
#envvars = read_credentials()
"""_from_dict test cases """
@patch.object(ChannelResponseDefinitionSeverity, '_from_dict')
@patch.object(ChannelResponseDefinitionAlertSourceItem, '_from_dict')
def test_from_dict_bad_key_neg(self, mock1, mock2):
self.assertRaises(
ValueError, ChannelResponseDefinition._from_dict, {"bad_key": "abc"})
def test_from_dict_success(self):
res = ChannelResponseDefinition._from_dict({
"channel_id":"abc", "name":"abc", "description":"abc",
"type": "abc", "severity": {"high":True, "medium":True, "low":True},
"endpoint":"http://abc.com", "enabled":True,
"alert_source":[],
"frequency":"abc"
})
print(res)
"""_to_dict test cases """
def test_to_dict_success(self):
TestChannelResponseDefinition.app.to_dict()
"""__eq__ test cases """
def test__eq__isinstance(self):
TestChannelResponseDefinition.app.__eq__(TestChannelResponseDefinition.app)
def test__eq__not_isinstance(self):
TestChannelResponseDefinition.app.__eq__({})
"""__ne__ test cases """
def test__ne__isinstance(self):
TestChannelResponseDefinition.app.__ne__(TestChannelResponseDefinition.app)
|
# coding=utf-8
from flask import (render_template, flash, redirect, url_for, request,
current_app)
from flask.ext.login import login_user, logout_user, login_required
from ..models import User
from . import auth
from .forms import LoginForm
@auth.route('/login', methods=['GET', 'POST'])
def login():
unsecured_live = all((
not current_app.config['DEBUG'], not current_app.config['TESTING'],
not request.is_secure))
if all(unsecured_live):
return redirect(url_for('.login', _external=True, _scheme='https'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.verify_password(form.password.data):
flash('Invalid email or password.', 'danger')
return redirect(url_for('.login'))
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('talks.index'))
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('talks.index')) |
""" A package with several SQL related helpers. """
|
from mrjob.job import MRJob
from mrjob.step import MRStep
import time
# Couldn/t use class objects as intermediate values of mapper and reducer steps since it is not serializable ,
# Also using namedtuple didnt work since it gets converted to list by default for serializability(Reason assumed)
# class day_temp_stats():
# def __init__(self,min_temp,max_temp,count,date = None):
# self.min_temp = min_temp
# self.max_temp = max_temp
# self.count = count
# self.date = date
class MinMaxCount(MRJob):
def __init__(self, *args, **kwargs):
super(MinMaxCount, self).__init__(*args, **kwargs)
def steps(self,args = None):
return [
MRStep(mapper=self.map_temp,
combiner = self.combine_local_min_max_count_temp,
reducer=self.reduce_global_min_max_count_temp),
MRStep(reducer=self.sorting_reducer)
]
def map_temp(self, _, line):
(city,temp,timestamp) = line.split('|')
#output (city,date), temperature
yield (city,self.get_day_from_timestamp(timestamp)), int(temp)
def combine_local_min_max_count_temp(self, city_day, data):
minx = 10000
maxx = -10000
count = 0
for temperature in data:
minx = min(temperature,minx)
maxx = max(temperature,maxx)
count+=1
if minx<10000 and maxx>-10000:
#output (city,date),(min_temperature,max_temperature,count)
yield city_day,(minx,maxx,count)
def reduce_global_min_max_count_temp(self, city_day, data):
minx = 10000
maxx = -10000
count = 0
for temperature in data:
minx = min(temperature[0],minx)
maxx = max(temperature[1],maxx)
count+=temperature[2]
if minx<10000 and maxx>-10000:
#yield city,(date,(min_temperature,max_temperature,count))
yield city_day[0],(city_day[1],(minx,maxx,count))
def sorting_reducer(self, city, day_stats):
day_stats = list(day_stats) #safe to do until date range is assumed to be small
day_stats = sorted(day_stats, key=lambda day_stats: day_stats[0])
for data in day_stats:
date = self.convert_epoch(data[0])
#output city,date,min_temp_max_temp,count
print city,date,data[1][0],data[1][1],data[1][2]
###################################################################################
## utility functions
def get_day_from_timestamp(self,timestamp):
one_day = 60*60*24
return int((int(timestamp)//one_day)*one_day)
def convert_epoch(self,epoch):
return time.strftime('%Y-%m-%d', time.localtime(epoch))
if __name__ == '__main__':
MinMaxCount.run()
|
#!/usr/bin/env python
# Copyright 2014, Rob Lyon <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.core import setup
import os
import sys
lib_path = os.path.join(os.path.dirname(os.path.abspath(__file__)))
if lib_path not in sys.path:
sys.path.insert(0, lib_path)
from qhost.constants import VERSION
setup(
name='qhost',
version=VERSION,
author='Rob Lyon <[email protected]>',
url='http://rlyon.me',
packages=['qhost'],
scripts=['bin/qhost']
)
|
from keyring.testing.backend import BackendBasicTests
from sagecipher.keyring import Keyring
class TestKeyring(BackendBasicTests):
def init_keyring(self):
return Keyring()
|
#!/usr/bin/python
# @marekq
# www.marek.rocks
import base64, botocore, boto3, csv, feedparser
import gzip, json, os, re, readability, requests
import queue, sys, threading, time
from aws_lambda_powertools import Logger, Tracer
from boto3.dynamodb.conditions import Key, Attr
from datetime import date, datetime, timedelta
from bs4 import BeautifulSoup
modules_to_be_patched = ["boto3", "requests"]
tracer = Tracer(patch_modules = modules_to_be_patched)
logger = Logger()
tracer = Tracer()
# establish a session with SES, DynamoDB and Comprehend
ddb = boto3.resource('dynamodb', region_name = os.environ['dynamo_region'], config = botocore.client.Config(max_pool_connections = 50)).Table(os.environ['dynamo_table'])
com = boto3.client(service_name = 'comprehend', region_name = os.environ['AWS_REGION'])
ses = boto3.client('ses')
s3 = boto3.client('s3')
# get the RSS feed through feedparser
@tracer.capture_method(capture_response = False)
def get_rss(url):
return feedparser.parse(url)
# write the blogpost record into DynamoDB
@tracer.capture_method(capture_response = False)
def put_dynamo(timest_post, title, cleantxt, rawhtml, description, link, blogsource, author, guid, tags, category, datestr_post):
# if no description was submitted, put a dummy value to prevent issues parsing the output
if len(description) == 0:
description = '...'
# put the record into dynamodb
ddb.put_item(
TableName = os.environ['dynamo_table'],
Item = {
'timest' : timest_post, # store the unix timestamp of the post
'datestr' : datestr_post, # store the human friendly timestamp of the post
'title' : title,
'description' : description, # store the short rss feed description of the content
'fulltxt': cleantxt, # store the "clean" text of the blogpost, using \n as a line delimiter
'rawhtml': rawhtml, # store the raw html output of the readability plugin, in order to include the blog content with text markup
'link' : link,
'blogsource' : blogsource,
'author' : author,
'tag' : tags,
'lower-tag' : tags.lower(), # convert the tags to lowercase, which makes it easier to search or match these
'guid' : guid, # store the blogpost guid as a unique key
'category' : category,
'visible' : 'y' # set the blogpost to visible by default - this "hack" allows for a simple query on a static primary key
})
# retrieve the url of a blogpost
@tracer.capture_method(capture_response = False)
def retrieve_url(url):
# set a "real" user agent
firefox = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:79.0) Gecko/20100101 Firefox/79.0"
# retrieve the main text section from the url using the readability module and using the Chrome user agent
req = requests.get(url, headers = {'User-Agent' : firefox})
doc = readability.Document(req.text)
rawhtml = doc.summary(html_partial = True)
# remove any html tags from output
soup = BeautifulSoup(rawhtml, 'html.parser')
cleantext = soup.get_text().strip('\n').encode('utf-8')
return str(rawhtml), str(cleantext)
# analyze the text of a blogpost using the AWS Comprehend service
@tracer.capture_method(capture_response = False)
def comprehend(cleantxt, title):
detections = []
found = False
fulltext = title + " " + cleantxt
# cut down the text to less than 5000 bytes as this is the file limit for Comprehend
strlen = sys.getsizeof(fulltext)
while strlen > 5000:
fulltext = fulltext[:-1]
strlen = sys.getsizeof(fulltext)
# check whether organization or title labels were found by Comprehend
for x in com.detect_entities(Text = fulltext, LanguageCode = 'en')['Entities']:
if x['Type'] == 'ORGANIZATION' or x['Type'] == 'TITLE' or x['Type'] == 'COMMERCIAL_ITEM' or x['Type'] == 'PERSON':
if x['Text'] not in detections:
detections.append(x['Text'])
found = True
# if no tags were retrieved, add a default tag
if found:
tags = ', '.join(detections)
else:
tags = 'none'
# return tag values
return(tags)
# send an email out whenever a new blogpost was found - this feature is optional
@tracer.capture_method(capture_response = False)
def send_email(recpt, title, blogsource, author, rawhtml, link, datestr_post):
# create a simple html body for the email
mailmsg = '<html><body><br><i>Posted by '+str(author)+' in ' +str(blogsource) + ' blog on ' + str(datestr_post) + '</i><br><br>'
mailmsg += '<a href="' + link + '">view post here</a><br><br>' + str(rawhtml) + '<br></body></html>'
# send the email using SES
r = ses.send_email(
Source = os.environ['fromemail'],
Destination = {'ToAddresses': [recpt]},
Message = {
'Subject': {
'Data': blogsource.upper() + ' - ' + title
},
'Body': {
'Html': {
'Data': mailmsg
}
}
}
)
print('sent email with subject ' + blogsource.upper() + ' - ' + title + ' to ' + recpt)
# main function to kick off collection of an rss feed
@tracer.capture_method(capture_response = False)
def get_feed(url, blogsource, guids):
# create a variable about blog update and list to store new blogs
blogupdate = False
newblogs = []
# get the rss feed
rssfeed = get_rss(url)
print('found ' + str(len(rssfeed['entries'])) + ' blog entries')
# check all the retrieved articles for published dates
for x in rssfeed['entries']:
# retrieve post guid
guid = str(x['guid'])
timest_post = int(time.mktime(x['updated_parsed']))
timest_now = int(time.time())
# retrieve blog date and description text
datestr_post = time.strftime('%d-%m-%Y %H:%M', x['updated_parsed'])
# if the post guid is not found in dynamodb and newer than the specified amount of days, retrieve the record
if guid not in guids and (timest_now < (timest_post + (86400 * days_to_retrieve))):
# retrieve other blog post values, remove double quotes from title
link = str(x['link'])
title = str(x['title']).replace('"', "'")
# retrieve the blogpost author if available
author = 'blank'
if x.has_key('author'):
author = str(x['author'])
# retrieve blogpost link
print('retrieving '+str(title)+' in '+str(blogsource)+' using url '+str(link)+'\n')
rawhtml, cleantxt = retrieve_url(link)
# DISABLED COMPREHEND TEMPORARILY - discover tags with comprehend on html output
#tags = comprehend(cleantxt, title)
tags = ''
# clean up blog post description text and remove unwanted characters such as double quotes and spaces (this can be improved further)
des = str(x['description'])
r = re.compile(r'<[^>]+>')
description = r.sub('', str(des)).strip(' ').replace('"', "'").strip('\n')
# submit the retrieved tag values discovered by comprehend to the list
category_tmp = []
category = 'none'
# join category fields in one string
if x.has_key('tags'):
for tag in x['tags']:
category_tmp.append(str(tag['term']))
category = str(', '.join(category_tmp))
# update the blogpost
blogupdate = True
# put record to dynamodb
put_dynamo(str(timest_post), title, cleantxt, rawhtml, description, link, blogsource, author, guid, tags, category, datestr_post)
# add blog to newblogs list
newblogs.append(str(blogsource) + ' ' + str(title) + ' ' + str(guid))
# if sendemails enabled, generate the email message body for ses and send email
if send_mail == 'y':
# get mail title and email recepient
mailt = blogsource.upper()+' - '+title
recpt = os.environ['toemail']
# send the email
send_email(recpt, title, blogsource, author, rawhtml, link, datestr_post)
return blogupdate, newblogs
# check if new items were uploaded to s3
@tracer.capture_method(capture_response = False)
def get_s3_json_age():
# set variable for s3 update operation
updateblog = False
# list objects in s3
s3list = s3.list_objects_v2(Bucket = os.environ['s3bucket'])
print('get s3 list ' + str(s3list))
# iterate over present files in s3
if 'Contents' in s3list:
for s3file in s3list['Contents']:
# get last modified time of item
s3time = s3file['LastModified']
objtime = int(time.mktime(s3time.timetuple()))
nowtime = int(time.time())
difftime = nowtime - objtime
# if an s3 file was created in the last 300 seconds, update the blog feed
if difftime < 300:
updateblog = True
print(str(difftime) + " " + str(s3file['Key']))
# return true/false about blog update status
return updateblog
# get the contents of the dynamodb table for json object on S3
@tracer.capture_method(capture_response = False)
def get_table_json(blogsource):
# create a list for found guids from json stored on s3
s3guids = []
# create a list for s3 objects that were found
s3files = []
# check if the s3 object exists by listing current s3 objects
s3list = s3.list_objects_v2(Bucket = os.environ['s3bucket'])
# set days_to_get value
days_to_get = int(days_to_retrieve)
# iterate over present files in s3
if 'Contents' in s3list:
for x in s3list['Contents']:
s3files.append(x['Key'])
# if the blog json is available on s3
if str(blogsource + '.json') in s3files:
# retrieve the object from s3
s3obj = s3.get_object(Bucket = os.environ['s3bucket'], Key = blogsource + '.json')
# create list for results from json
res = json.loads(s3obj['Body'].read())
# add guids from json file to s3guids list
for s3file in res:
s3guids.append(s3file['guid'])
# if the blog json does not exist on s3
else:
# since the previous results can not be found, create an emptylist for results and get current time
res = []
print('could not find ' + blogsource + '.json file on s3')
# get the current timestamp
now_ts = datetime.now()
# get timestamp based on days_to_retrieve
old_ts = now_ts - timedelta(days = days_to_get)
diff_ts = int(time.mktime(old_ts.timetuple()))
if blogsource != 'all':
# query the dynamodb table for blogposts of a specific category from up to 1 day ago
blogs = ddb.query(ScanIndexForward = True, ProjectionExpression = 'blogsource, datestr, timest, title, author, description, link, guid', KeyConditionExpression = Key('blogsource').eq(blogsource) & Key('timest').gt(str(diff_ts)))
else:
# query the dynamodb table for all category blogposts from up to 1 day ago
blogs = ddb.query(ScanIndexForward = True, IndexName = 'timest', ProjectionExpression = 'blogsource, datestr, timest, title, author, description, link, guid', KeyConditionExpression = Key('visible').eq('y') & Key('timest').gt(str(diff_ts)))
# iterate over the returned items
for a in blogs['Items']:
# if guid not present in s3 json file
if a['guid'] not in s3guids:
b = {'timest': a['timest'], 'blogsource': a['blogsource'], 'title': a['title'], 'datestr': a['datestr'], 'guid': a['guid'], 'author': a['author'], 'link': a['link'], 'description': a['description'].strip(), 'author': a['author']}
# add the json object to the result list
res.append(b)
# retrieve additional items if lastevaluatedkey was found
while 'LastEvaluatedKey' in blogs:
lastkey = blogs['LastEvaluatedKey']
if blogsource != 'all':
# query the dynamodb table for blogposts of a specific category
blogs = ddb.query(ScanIndexForward = True, ExclusiveStartKey = lastkey, ProjectionExpression = 'blogsource, datestr, timest, title, author, description, link, guid', KeyConditionExpression = Key('source').eq(source) & Key('timest').gt(str(diff_ts)))
else:
# query the dynamodb table for all category blogposts from up to 30 days old
blogs = ddb.query(ScanIndexForward = True, ExclusiveStartKey = lastkey, IndexName = 'timest', ProjectionExpression = 'blogsource, datestr, timest, title, author, description, link, guid', KeyConditionExpression = Key('visible').eq('y') & Key('timest').gt(str(diff_ts)))
# add an entry per blog to the output list
for a in blogs['Items']:
# if guid not present in s3 json file
if a['guid'] not in s3guids:
b = {'timest': a['timest'], 'blogsource': a['blogsource'], 'title': a['title'], 'datestr': a['datestr'], 'guid': a['guid'], 'author': a['author'], 'link': a['link'], 'description': a['description'].strip(), 'author': a['author']}
# add the json object to the result list
res.append(b)
return res
# copy the file to s3 with a public acl
@tracer.capture_method(capture_response = False)
def cp_s3(blogsource):
# put object to s3
s3.put_object(
Bucket = os.environ['s3bucket'],
Body = open('/tmp/' + blogsource + '.json', 'rb'),
Key = blogsource + '.json',
ACL = 'public-read',
CacheControl = 'public',
ContentType = 'application/json'
)
# update json objects on S3 for single page web apps
@tracer.capture_method(capture_response = False)
def update_json_s3(blog):
print('updating json for ' + blog)
# get the json content from DynamoDB
out = get_table_json(blog)
# create the json and return path
make_json(out, blog)
# upload the json to s3
cp_s3(blog)
# create a json file from blog content
def make_json(content, blogsource):
# write the json file to /tmp/
fpath = '/tmp/' + blogsource + '.json'
# create empty list for filteredcontent
filteredcontent = []
# filter blog posts for category
for blog in content:
if blog['blogsource'] == blogsource or blogsource == 'all':
filteredcontent.append(blog)
# sort the keys by timestamp
dumpfile = sorted(filteredcontent, key = lambda k: k['timest'], reverse = True)
with open(fpath, "w") as outfile:
json.dump(dumpfile, outfile)
print('wrote to ' + fpath)
# lambda handler
@logger.inject_lambda_context(log_event = True)
@tracer.capture_lambda_handler
def handler(event, context):
# set default value for 'days_to_retrieve'
global days_to_retrieve
days_to_retrieve = int(1)
# set send email boolean, newblog and blogupdate default values
global send_mail
send_mail = ''
newblogs = ''
blogupdate = False
# if updating all blogposts, set source to 'all' and skip blogpost retrieval
if event['msg'] == 'all':
blogsource = 'all'
# check if there are files on s3 less than 60 seconds old
blogupdate = get_s3_json_age()
else:
# get submitted values from blog to retrieve
url = event['msg']['url']
blogsource = event['msg']['blogsource']
guids = event['guids']
days_to_retrieve = int(event['msg']['daystoretrieve'])
send_mail = event['sendemail']
# get feed and boolean indicating if an update to s3 is required
blogupdate, newblogs = get_feed(url, blogsource, guids)
# if new blogposts found, create new json output on s3
if blogupdate == True:
print('updating json output on s3 for ' + blogsource)
update_json_s3(blogsource)
return newblogs
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A source for reading VCF file headers."""
from __future__ import absolute_import
from collections import OrderedDict
from functools import partial
from typing import Dict, Iterable # pylint: disable=unused-import
import vcf
import apache_beam as beam
from apache_beam.io import filebasedsource
from apache_beam.io import range_trackers # pylint: disable=unused-import
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystems import FileSystems
from apache_beam.io.iobase import Read
from apache_beam.transforms import PTransform
from gcp_variant_transforms.beam_io import vcfio
class VcfHeaderFieldTypeConstants(object):
"""Constants for types from VCF header."""
FLOAT = 'Float'
INTEGER = 'Integer'
STRING = 'String'
FLAG = 'Flag'
CHARACTER = 'Character'
class VcfParserHeaderKeyConstants(object):
"""Constants for header fields from the parser (currently PyVCF)."""
ID = 'id'
NUM = 'num'
TYPE = 'type'
DESC = 'desc'
SOURCE = 'source'
VERSION = 'version'
LENGTH = 'length'
class VcfHeader(object):
"""Container for header data."""
def __init__(self,
infos=None, # type: Dict[str, OrderedDict[vcf.parser._Info]]
filters=None, # type: Dict[str, OrderedDict[vcf.parser._Filter]]
alts=None, # type: Dict[str, OrderedDict[vcf.parser._Alt]]
formats=None, # type: Dict[str, OrderedDict[vcf.parser._Format]]
contigs=None, # type: Dict[str, OrderedDict[vcf.parser._Contig]]
file_name=None # type: str
):
# type: (...) -> None
"""Initializes a VcfHeader object.
It keeps the order of values in the input dictionaries. Order is important
in some fields like `contigs` and for ensuring order is unchanged in
VCF->VCF pipelines.
Args:
infos: A dictionary mapping info keys to vcf info metadata values.
filters: A dictionary mapping filter keys to vcf filter metadata values.
alts: A dictionary mapping alt keys to vcf alt metadata values.
formats: A dictionary mapping format keys to vcf format metadata values.
contigs: A dictionary mapping contig keys to vcf contig metadata values.
file_name: The file name of the vcf file.
"""
self.infos = self._values_asdict(infos or {})
self.filters = self._values_asdict(filters or {})
self.alts = self._values_asdict(alts or {})
self.formats = self._values_asdict(formats or {})
self.contigs = self._values_asdict(contigs or {})
self.file_name = file_name
def __eq__(self, other):
return (self.infos == other.infos and
self.filters == other.filters and
self.alts == other.alts and
self.formats == other.formats and
self.contigs == other.contigs)
def __repr__(self):
return ', '.join([str(header) for header in [self.infos,
self.filters,
self.alts,
self.formats,
self.contigs]])
def _values_asdict(self, header):
"""Converts PyVCF header values to ordered dictionaries."""
ordered_dict = OrderedDict()
for key in header:
# These methods were not designed to be protected. They start with an
# underscore to avoid conflicts with field names. For more info, see
# https://docs.python.org/2/library/collections.html#collections.namedtuple
ordered_dict[key] = header[key]._asdict() # pylint: disable=W0212
return ordered_dict
class VcfHeaderSource(filebasedsource.FileBasedSource):
"""A source for reading VCF file headers.
Parses VCF files (version 4) using PyVCF library.
"""
def __init__(self,
file_pattern,
compression_type=CompressionTypes.AUTO,
validate=True):
# type: (str, str, bool) -> None
super(VcfHeaderSource, self).__init__(file_pattern,
compression_type=compression_type,
validate=validate,
splittable=False)
self._compression_type = compression_type
def read_records(
self,
file_name, # type: str
unused_range_tracker # type: range_trackers.UnsplittableRangeTracker
):
# type: (...) -> Iterable[VcfHeader]
try:
vcf_reader = vcf.Reader(fsock=self._read_headers(file_name))
except StopIteration:
raise ValueError('{} has no header.'.format(file_name))
yield VcfHeader(infos=vcf_reader.infos,
filters=vcf_reader.filters,
alts=vcf_reader.alts,
formats=vcf_reader.formats,
contigs=vcf_reader.contigs,
file_name=file_name)
def _read_headers(self, file_name):
with FileSystems.open(
file_name, compression_type=self._compression_type) as file_to_read:
while True:
record = file_to_read.readline()
if record and record.startswith('#'):
yield record
else:
break
class ReadVcfHeaders(PTransform):
"""A PTransform for reading the header lines of VCF files.
Parses VCF files (version 4) using PyVCF library.
"""
def __init__(
self,
file_pattern, # type: str
compression_type=CompressionTypes.AUTO, # type: str
validate=True, # type: bool
**kwargs # type: **str
):
# type: (...) -> None
"""Initialize the :class:`ReadVcfHeaders` transform.
Args:
file_pattern: The file path to read from either as a single file or a glob
pattern.
compression_type: Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
validate: Flag to verify that the files exist during the pipeline creation
time.
"""
super(ReadVcfHeaders, self).__init__(**kwargs)
self._source = VcfHeaderSource(
file_pattern, compression_type, validate=validate)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
def _create_vcf_header_source(file_pattern=None, compression_type=None):
return VcfHeaderSource(file_pattern=file_pattern,
compression_type=compression_type)
class ReadAllVcfHeaders(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading the
header lines of :class:`~apache_beam.pvalue.PCollection` of VCF files.
Reads a :class:`~apache_beam.pvalue.PCollection` of VCF files or file patterns
and produces a PCollection :class:`VcfHeader` objects.
This transform should be used when reading from massive (>70,000) number of
files.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
**kwargs):
# type: (int, str, **str) -> None
"""Initialize the :class:`ReadAllVcfHeaders` transform.
Args:
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type: Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
"""
super(ReadAllVcfHeaders, self).__init__(**kwargs)
source_from_file = partial(
_create_vcf_header_source, compression_type=compression_type)
self._read_all_files = filebasedsource.ReadAllFiles(
False, # splittable (we are just reading the headers)
CompressionTypes.AUTO, desired_bundle_size,
0, # min_bundle_size
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class HeaderTypeConstants(object):
INFO = 'INFO'
FILTER = 'FILTER'
ALT = 'ALT'
FORMAT = 'FORMAT'
CONTIG = 'contig'
class _HeaderFieldKeyConstants(object):
ID = 'ID'
NUMBER = 'Number'
TYPE = 'Type'
DESCRIPTION = 'Description'
SOURCE = 'Source'
VERSION = 'Version'
LENGTH = 'length'
class _WriteVcfHeaderFn(beam.DoFn):
"""A DoFn for writing VCF headers to a file."""
HEADER_TEMPLATE = '##{}=<{}>\n'
FINAL_HEADER_LINE = '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT\n'
def __init__(self, file_path):
# type: (str) -> None
self._file_path = file_path
self._file_to_write = None
def process(self, header):
# type: (VcfHeader) -> None
with FileSystems.create(self._file_path) as self._file_to_write:
self._write_headers_by_type(HeaderTypeConstants.INFO, header.infos)
self._write_headers_by_type(HeaderTypeConstants.FILTER, header.filters)
self._write_headers_by_type(HeaderTypeConstants.ALT, header.alts)
self._write_headers_by_type(HeaderTypeConstants.FORMAT, header.formats)
self._write_headers_by_type(HeaderTypeConstants.CONTIG, header.contigs)
self._file_to_write.write(self.FINAL_HEADER_LINE)
def _write_headers_by_type(self, header_type, headers):
# type: (str, Dict[str, Dict[str, Union[str, int]]]) -> None
"""Writes all VCF headers of a specific type.
Args:
header_type: The type of `headers` (e.g. INFO, FORMAT, etc.).
headers: Each value of headers is a dictionary that describes a single VCF
header line.
"""
for header in headers.values():
self._file_to_write.write(
self._to_vcf_header_line(header_type, header))
def _to_vcf_header_line(self, header_type, header):
# type: (str, Dict[str, Union[str, int]]) -> str
"""Formats a single VCF header line.
Args:
header_type: The VCF type of `header` (e.g. INFO, FORMAT, etc.).
header: A dictionary mapping header field keys (e.g. id, desc, etc.) to
their corresponding values for the header line.
Returns:
A formatted VCF header line.
"""
formatted_header_values = self._format_header(header)
return self.HEADER_TEMPLATE.format(header_type, formatted_header_values)
def _format_header(self, header):
# type: (Dict[str, Union[str, int]]) -> str
"""Formats all key, value pairs that describe the header line.
Args:
header: A dictionary mapping header field keys (e.g. id, desc, etc.) to
their corresponding values for the header line.
Returns:
A formatted string composed of header keys and values.
"""
formatted_values = []
for key, value in header.iteritems():
if self._should_include_key_value(key, value):
formatted_values.append(self._format_header_key_value(key, value))
return ','.join(formatted_values)
def _should_include_key_value(self, key, value):
return value is not None or (key != 'source' and key != 'version')
def _format_header_key_value(self, key, value):
# type: (str, Union[str, int]) -> str
"""Formats a single key, value pair in a header line.
Args:
key: The key of the header field (e.g. num, desc, etc.).
value: The header value corresponding to the key in a specific
header line.
Returns:
A formatted key, value pair for a VCF header line.
"""
key = self._format_header_key(key)
if value is None:
value = vcfio.MISSING_FIELD_VALUE
elif key == _HeaderFieldKeyConstants.NUMBER:
value = self._format_number(value)
elif (key == _HeaderFieldKeyConstants.DESCRIPTION
or key == _HeaderFieldKeyConstants.SOURCE
or key == _HeaderFieldKeyConstants.VERSION):
value = self._format_string_value(value)
return '{}={}'.format(key, value)
def _format_header_key(self, key):
if key == VcfParserHeaderKeyConstants.ID:
return _HeaderFieldKeyConstants.ID
elif key == VcfParserHeaderKeyConstants.NUM:
return _HeaderFieldKeyConstants.NUMBER
elif key == VcfParserHeaderKeyConstants.DESC:
return _HeaderFieldKeyConstants.DESCRIPTION
elif key == VcfParserHeaderKeyConstants.TYPE:
return _HeaderFieldKeyConstants.TYPE
elif key == VcfParserHeaderKeyConstants.SOURCE:
return _HeaderFieldKeyConstants.SOURCE
elif key == VcfParserHeaderKeyConstants.VERSION:
return _HeaderFieldKeyConstants.VERSION
elif key == VcfParserHeaderKeyConstants.LENGTH:
return _HeaderFieldKeyConstants.LENGTH
else:
raise ValueError('Invalid VCF header key {}.'.format(key))
def _format_number(self, number):
# type: (int) -> Optional[str]
"""Returns the string representation of field_count from PyVCF.
PyVCF converts field counts to an integer with some predefined constants
as specified in the vcf.parser.field_counts dict (e.g. 'A' is -1). This
method converts them back to their string representation to avoid having
direct dependency on the arbitrary PyVCF constants.
Args:
number: An integer representing the number of fields in INFO as specified
by PyVCF.
Returns:
A string representation of field_count (e.g. '-1' becomes 'A').
Raises:
ValueError: if the number is not valid.
"""
if number is None:
return None
elif number >= 0:
return str(number)
number_to_string = {v: k for k, v in vcf.parser.field_counts.items()}
if number in number_to_string:
return number_to_string[number]
else:
raise ValueError('Invalid value for number: {}'.format(number))
def _format_string_value(self, value):
return '"{}"'.format(value)
class WriteVcfHeaders(PTransform):
"""A PTransform for writing VCF header lines."""
def __init__(self, file_path):
# type: (str) -> None
self._file_path = file_path
def expand(self, pcoll):
return pcoll | beam.ParDo(_WriteVcfHeaderFn(self._file_path))
|
from PyQt5 import QtGui
import cv2
def qimage_of_item(item_name: str) -> QtGui.QImage:
"""Get item picture"""
# If item has 'ENCHANTED' in the name, give the normal picture because
# im too lazy to get the enchanted pictures
if 'ENCHANTED' in item_name:
item_name_split = item_name.split('_')[1:]
img_src = f"item_icons/{'_'.join(item_name_split)}.png"
else:
img_src = f"item_icons/{item_name}.png"
img = cv2.imread(img_src, cv2.IMREAD_UNCHANGED)
img = cv2.imread('item_icons/NOT_FOUND.png', cv2.IMREAD_UNCHANGED) if img is None else img
height, width, bytes = img.shape
bytes_per_line = 3 * width
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB, img)
return QtGui.QImage(img.data, width, height, bytes_per_line, QtGui.QImage.Format_RGB888) |
import pandas as pd
import plotly.graph_objects as go
print('\tStarting /home/duck/scripts/reproduce_historical_sales_data_diagram.py…')
path = '/home/duck/data'
csv_file_path = path + '/basedata/historical_sales_data_csv_format.csv'
image_path = path + '/images'
df = pd.read_csv(csv_file_path, index_col=0)
print('Reading csv file…')
year = df['Year']
months = df['Month']
fish = df['Fish']
ducks = df['Ducks']
total = df['Total']
monthList = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
indexMonths = 0
index = 0
date_data = []
for month in months:
date_data.append(monthList[indexMonths] + ' ' + str(year[index]))
index += 1
indexMonths += 1
if indexMonths == 12:
indexMonths = 0
df = df.drop(columns=['Month', 'Year'])
df['Date'] = date_data
print('Creating Figure…')
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['Date'], y=ducks, line_color='red', name='Ducks'))
fig.add_trace(go.Scatter(x=df['Date'], y=fish, line_color='blue', name='Fish'))
fig.add_trace(go.Scatter(x=df['Date'], y=total, line_color='green', name='Total'))
fig.update_xaxes(title_text="Months")
fig.update_yaxes(title_text="Units sold")
print('Saving Figure…')
fig.write_image(image_path + '/hist_sales_graph.png')
print('Figure saved!')
print('\tFinished /home/duck/scripts/reproduce_historical_sales_data_diagram.py!') |
import scrapy
from scrapy import signals
from scrapy.shell import inspect_response
from ids import QUEST_IDS
from utils import Merger
from utils.formatter import Formatter
from lang_data import get_filter_list_by_lang
import re
import json
class QuestSpider(scrapy.Spider):
name = "quest_scraper"
start_urls = []
quest_data = []
lang = ""
base_url = "https://{}.classic.wowhead.com/quest={}/"
xpath_title = "//div[@class='text']/h1[@class='heading-size-1']/text()"
xpath_objective_and_description = "//div[@class='block-block-bg is-btf']//following-sibling::text()"
def __init__(self, lang="en", **kwargs):
super().__init__(**kwargs)
self.lang = lang
if lang == "mx":
self.base_url = "https://db.wowlatinoamerica.com/?quest={}"
self.start_urls = [self.base_url.format(qid) for qid in QUEST_IDS]
self.xpath_title = "//div[@class='text']/h1/text()"
self.xpath_objective_and_description = "//div[@class='text']/h1//following-sibling::text()"
else:
self.start_urls = [self.base_url.format(lang, qid) for qid in QUEST_IDS]
# self.start_urls = [self.base_url.format(lang, qid) for qid in [8]]
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(QuestSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def parse(self, response):
if "?notFound=" in response.url:
qid = response.url[response.url.index("?notFound=") + 10:]
self.logger.warning("Quest with ID '{}' could not be found.".format(qid))
return
if self.lang == "mx":
qid = response.url.split("/")[-1][7:] # It is /?quest=
else:
qid = response.url.split("/")[-2][6:]
# inspect_response(response, self)
title = self.__parse_title(response)
description, objective = self.__parse_objective_and_description(response)
minLevel = self.__parse_required_level(response)
experience = self.__parse_experience(response)
result = {
"id": int(qid),
"minLevel": minLevel,
"experience": experience,
"title": title,
"objective": objective,
"description": description
}
# self.logger.info(result)
yield result
def __parse_title(self, response) -> str:
title: str = response.xpath(self.xpath_title).get()
title = self.__filter_title(title)
return title
@staticmethod
def __filter_title(title: str) -> str:
if title.startswith("[DEPRECATED]"):
title = title[13:]
elif title.startswith("["):
return ""
elif "<NYI>" in title:
return ""
if title.startswith("« "):
title = title[1:]
if title.endswith(" »"):
title = title[:-2]
return title.strip()
def __parse_experience(self, response) -> int:
body = str(response.body)
#Archive
rest = re.search("(\d+,\d+,\d+,\d+|\d+,\d+,\d+|\d+,\d+|\d+) experience", body)
if(rest is not None):
#print(rest.group(1))
experience = rest.group(1)
return str(experience).replace(",", "")
#else:
# print("Something wong?")
#WoWhead
rest = re.search("g_quests\[\d+\], {(.*?)}", body)
if(rest is not None):
dataString = "{"+str(rest.group(1))+"}"
dataString = dataString.replace("\\", "")
questJsonData = json.loads(dataString)
#print(rest.group(1))
if "xp" in questJsonData:
experience = questJsonData["xp"]
return experience
else:
return None
return None
def __parse_required_level(self, response) -> int:
body = str(response.body)
rest = re.search("Requires level (\d+)", body)
if(rest is not None):
#print(rest.group(1))
minLevel = rest.group(1)
return minLevel
rest = re.search("Requires level: (\d+)", body)
if(rest is not None):
#print(rest.group(1))
minLevel = rest.group(1)
return minLevel
#"reqlevel":1
rest = re.search('"reqlevel":(\d+)', body)
if(rest is not None):
#print(rest.group(1))
minLevel = rest.group(1)
return minLevel
return None
def __parse_objective_and_description(self, response):
text_snippets = response.xpath(self.xpath_objective_and_description).extract()
data_list = self.__filter_text_snippets(text_snippets)
if len(data_list) < 2:
self.logger.warning("Wrong structured HTML for {}".format(response.url))
objective = ""
description = ""
else:
objective = data_list[0]
description = data_list[1]
return description, objective
def __filter_text_snippets(self, text_snippets):
data_list = []
lastTextSegment = False # This is True if the segment is the last of the current category
for t in text_snippets:
t = self.__filter_text(t)
if not t.strip(): # Segment just contains whitespaces/linebreaks
continue
if lastTextSegment or not data_list: # The previous segment was the last of a category (objective/description)
lastTextSegment = t.endswith("\n")
t = t.replace("\n", "")
data_list.append([t.strip()])
else:
lastTextSegment = t.endswith("\n")
t = t.replace("\n", "")
data_list[-1].append(t.strip()) # Append to the existing list
return list(filter(None, data_list))
def __filter_text(self, text: str) -> str:
filter_list = get_filter_list_by_lang(self.lang)
# Don't include untranslated text pieces
if self.lang != "en" and (
"You" in text or "you" in text or " the " in text or " I " in text or " to " in text or "[" in text or "]" in text):
return ""
# text = text.replace("\n", "")
for f in filter_list:
if text.startswith(f):
return ""
elif f in text:
text = text[:text.index(f)]
text = text.replace(" ", " ")
if text.endswith("\\"):
text = text[:-1]
return text
def spider_closed(self, spider):
self.logger.info("Spider closed.")
f = Formatter()
f(self.lang, "quest")
m = Merger(self.lang, "Quests")
m()
self.logger.info("New lookup file at '{}'".format(m.lang_dir))
|
# General imports
import numpy as np
from torch.nn.functional import softmax
from scipy.stats.mstats import mquantiles
import matplotlib.pyplot as plt
from torchvision.utils import save_image
import pickle
import random
import seaborn as sns
import torch
import sys
import torchvision
from torchvision import transforms, datasets
from torch.utils.data.dataset import random_split
from sklearn.model_selection import train_test_split
from numpy.random import default_rng
# My imports
sys.path.insert(0, './')
from Third_Party.smoothing_adversarial.architectures import get_architecture
import RSCP.Score_Functions as scores
from Architectures.DenseNet import DenseNet
from Architectures.VGG import vgg19_bn, VGG
from Architectures.ResNet import ResNet
from RSCP.utils import Smooth_Adv, get_normalize_layer, NormalizeLayer
alpha = 0.4 # desired nominal marginal coverage
epsilon = 0.125 # L2 bound on the adversarial noise
n_test = 10000 # number of test points (if larger then available it takes the entire set)
train = False # whether to train a model or not
ratio = 2 # ratio between adversarial noise bound to smoothed noise
sigma_smooth = ratio * epsilon # sigma used fro smoothing
sigma_model = sigma_smooth # sigma used for training the model
n_smooth = 1 # number of samples used for smoothing
My_model = False # use my model or salman/cohen models
dataset = "ImageNet" # dataset to be used 'MNIST', 'CIFAR100', 'CIFAR10', 'ImageNet'
calibration_scores = ['SC'] # score function to check 'HCC', 'SC', 'SC_Reg'
model_type = 'ResNet'
load = True
base_size=20
linesize=4
if not load:
if dataset == "ImageNet":
GPU_CAPACITY = 64
else:
GPU_CAPACITY = 1024
# calculate correction based on the Lipschitz constant
if sigma_smooth == 0:
correction = 10000
else:
correction = float(epsilon) / float(sigma_smooth)
# set random seed
seed = 0
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# load datasets
if dataset == "MNIST":
# Load train set
train_dataset = torchvision.datasets.MNIST(root='./datasets/',
train=True,
transform=torchvision.transforms.ToTensor(),
download=True)
# load test set
test_dataset = torchvision.datasets.MNIST(root='./datasets',
train=False,
transform=torchvision.transforms.ToTensor())
elif dataset == "CIFAR10":
# Load train set
train_dataset = torchvision.datasets.CIFAR10(root='./datasets/',
train=True,
transform=torchvision.transforms.ToTensor(),
download=True)
# load test set
test_dataset = torchvision.datasets.CIFAR10(root='./datasets',
train=False,
transform=torchvision.transforms.ToTensor())
elif dataset == "CIFAR100":
# Load train set
train_dataset = torchvision.datasets.CIFAR100(root='./datasets/',
train=True,
transform=torchvision.transforms.ToTensor(),
download=True)
# load test set
test_dataset = torchvision.datasets.CIFAR100(root='./datasets',
train=False,
transform=torchvision.transforms.ToTensor())
elif dataset == "ImageNet":
# get dir of imagenet validation set
imagenet_dir = "./datasets/imagenet"
transform = transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor()])
test_dataset = datasets.ImageFolder(imagenet_dir, transform)
# cut the size of the test set if necessary
if n_test < len(test_dataset):
test_dataset = torch.utils.data.random_split(test_dataset, [n_test, len(test_dataset) - n_test])[0]
# save the sizes of each one of the sets
if dataset != "ImageNet":
n_train = len(train_dataset)
n_test = len(test_dataset)
# Create Data loader for test set
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=n_test,
shuffle=False)
# convert test set into tensor
examples = enumerate(test_loader)
batch_idx, (x_test, y_test) = next(examples)
# get dimension of data
rows = x_test.size()[2]
cols = x_test.size()[3]
channels = x_test.size()[1]
if dataset == "ImageNet":
num_of_classes = 1000
else:
num_of_classes = len(train_dataset.classes)
min_pixel_value = 0.0
max_pixel_value = 1.0
# automatically choose device use gpu 0 if it is available o.w. use the cpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# print the chosen device
print("device: ", device)
# loading a pre-trained model
# load my models
if My_model:
if dataset == "CIFAR10":
if model_type == 'ResNet':
model = ResNet(depth=110, num_classes=10)
state = torch.load('./checkpoints/CIFAR10_ResNet110_Robust_sigma_' + str(sigma_model) + '.pth.tar',
map_location=device)
elif model_type == 'DenseNet':
model = DenseNet(depth=100, num_classes=10, growthRate=12)
state = torch.load('./checkpoints/CIFAR10_DenseNet_sigma_' + str(sigma_model) + '.pth.tar',
map_location=device)
elif model_type == 'VGG':
model = vgg19_bn(num_classes=10)
state = torch.load('./checkpoints/CIFAR10_VGG_sigma_' + str(sigma_model) + '.pth.tar',
map_location=device)
normalize_layer = get_normalize_layer("cifar10")
model = torch.nn.Sequential(normalize_layer, model)
model.load_state_dict(state['state_dict'])
elif dataset == "CIFAR100":
if model_type == 'ResNet':
model = ResNet(depth=110, num_classes=100)
state = torch.load('./checkpoints/ResNet110_Robust_sigma_' + str(sigma_model) + '.pth.tar',
map_location=device)
elif model_type == 'DenseNet':
model = DenseNet(depth=100, num_classes=100, growthRate=12)
state = torch.load('./checkpoints/DenseNet_sigma_' + str(sigma_model) + '.pth.tar', map_location=device)
elif model_type == 'VGG':
model = vgg19_bn(num_classes=100)
state = torch.load('./checkpoints/VGG_sigma_' + str(sigma_model) + '.pth.tar', map_location=device)
normalize_layer = get_normalize_layer("cifar10")
model = torch.nn.Sequential(normalize_layer, model)
model.load_state_dict(state['state_dict'])
# load cohen and salman models
else:
# checkpoint = torch.load(
# './pretrained_models/Salman/cifar10/finetune_cifar_from_imagenetPGD2steps/PGD_10steps_30epochs_multinoise/2-multitrain/eps_64/cifar10/resnet110/noise_'+str(sigma_model)+'/checkpoint.pth.tar')
if dataset == "CIFAR10":
checkpoint = torch.load(
'./Pretrained_Models/Cohen/cifar10/resnet110/noise_' + str(sigma_model) + '/checkpoint.pth.tar',
map_location=device)
model = get_architecture(checkpoint["arch"], "cifar10")
elif dataset == "ImageNet":
checkpoint = torch.load(
'./Pretrained_Models/Cohen/imagenet/resnet50/noise_' + str(sigma_model) + '/checkpoint.pth.tar',
map_location=device)
model = get_architecture(checkpoint["arch"], "imagenet")
model.load_state_dict(checkpoint['state_dict'])
# send model to device
model.to(device)
# put model in evaluation mode
model.eval()
# create indices for the test points
indices = torch.arange(n_test)
scores_list = []
for score in calibration_scores:
if score == 'HCC':
scores_list.append(scores.class_probability_score)
if score == 'SC':
scores_list.append(scores.generalized_inverse_quantile_score)
if score == 'SC_Reg':
scores_list.append(scores.rank_regularized_score)
# generate adversarial examples
x_test_adv = torch.randn_like(x_test)
x_test_adv_base = torch.randn_like(x_test)
# Split test data into calibration and test
idx1, idx2 = train_test_split(indices, test_size=0.5)
# save sizes of calibration and test sets
n_calib = x_test[idx1].size()[0]
n_test_new = x_test[idx2].size()[0]
print(n_calib)
print(n_test_new)
scores_simple = np.zeros((len(scores_list), n_calib))
# create container for the calibration thresholds
thresholds = np.zeros((len(scores_list), 3))
# calculate maximum batch size according to gpu capacity
batch_size = GPU_CAPACITY // n_smooth
# calculate number of batches
if n_calib % batch_size != 0:
num_of_batches = (n_calib // batch_size) + 1
else:
num_of_batches = (n_calib // batch_size)
# create container for smoothed and base classifier outputs
simple_outputs = np.zeros((n_calib, num_of_classes))
# initiate random uniform variables for inverse quantile score
rng = default_rng()
uniform_variables = rng.uniform(size=n_calib, low=0.0, high=1.0)
# pass all points to model in batches and calculate scores
for j in range(num_of_batches):
# get inputs and labels of batch
inputs = x_test[idx1][(j * batch_size):((j + 1) * batch_size)]
labels = y_test[idx1][(j * batch_size):((j + 1) * batch_size)]
noise = (torch.randn_like(inputs)*sigma_smooth).to(device)
noisy_points = inputs.to(device) + noise
# get classifier predictions on noisy points
model.eval() # put in evaluation mode
with torch.no_grad():
noisy_outputs = model(noisy_points).to(torch.device('cpu'))
# transform the output into probabilities vector
noisy_outputs = softmax(noisy_outputs, dim=1).numpy()
# get smoothed score for each point
simple_outputs[(j * batch_size):((j + 1) * batch_size), :] = noisy_outputs
# run over all scores functions and compute scores of smoothed and base classifier
for p, score_func in enumerate(scores_list):
scores_simple[p, :] = score_func(simple_outputs, y_test[idx1], uniform_variables, all_combinations=False)
# Compute thresholds
level_adjusted = (1.0 - alpha) * (1.0 + 1.0 / float(n_calib))
for p in range(len(scores_list)):
thresholds[p, 0] = mquantiles(scores_simple[p, :], prob=level_adjusted)
probabilities = [simple_outputs[m, y_test[idx1][m]] for m in range(n_calib)]
for k in range(n_test_new):
u = rng.uniform(size=1, low=0.0, high=1.0)
noise = (torch.randn_like(x_test[idx2][k:(k+1)])*sigma_smooth).to(device)
noisy_points = x_test[idx2][k:(k+1)].to(device) + noise
# get classifier predictions on noisy points
model.eval() # put in evaluation mode
with torch.no_grad():
noisy_outputs = model(noisy_points).to(torch.device('cpu'))
# transform the output into probabilities vector
noisy_outputs = softmax(noisy_outputs, dim=1).numpy()
test_score = scores_list[0](noisy_outputs, y_test[idx2][k:(k+1)], u, all_combinations=False)
if test_score > thresholds[0, 0]:
continue
# Generate adversarial test examples for the base classifier
x_test_adv_base = Smooth_Adv(model, x_test[idx2][k:(k+1)], y_test[idx2][k:(k+1)], noise, 20, epsilon,
device, GPU_CAPACITY)
noisy_points = x_test_adv_base.to(device) + noise
# get classifier predictions on noisy points
model.eval() # put in evaluation mode
with torch.no_grad():
noisy_outputs = model(noisy_points).to(torch.device('cpu'))
# transform the output into probabilities vector
noisy_outputs = softmax(noisy_outputs, dim=1).numpy()
test_score_adv = scores_list[0](noisy_outputs, y_test[idx2][k:(k+1)], u, all_combinations=False)
if test_score_adv > thresholds[0, 0]:
break
else:
print("loading results:")
with open("./Create_Figures/Demonstration.pickle", 'rb') as f:
scores_simple, n_calib, thresholds, test_score, test_score_adv, test_point, test_label, x_test_adv_base = pickle.load(f)
# plot histogram with bounds
plt.figure(figsize=[6.4, 4.8])
to_plot = np.zeros_like(scores_simple[0, :])
for t in range(n_calib):
if (scores_simple[0, t] > 0.95) and (np.random.random() > 0.6):
to_plot[t] = np.random.random()
else:
to_plot[t] = scores_simple[0, t]
sns.histplot(to_plot, bins=25, alpha=0.4)
plt.tick_params(axis='both', which='major', labelsize=base_size)
plt.axvline(x=0.8, color='r', linewidth=linesize)
plt.xlabel("Calibration Scores", size=base_size, horizontalalignment='right', x=0.84)
plt.ylabel("Count", size=base_size)
plt.axvline(x=0.3, color='m', linewidth=linesize)
plt.axvline(x=0.9, color='m', linewidth=linesize)
plt.axvline(x=0.9+0.04, color='g', linewidth=linesize)
plt.tight_layout()
plt.savefig("./Create_Figures/Figures/Hist.jpg", dpi=300)
if not load:
save_image(x_test[idx2][k], 'img1.png')
save_image(x_test_adv_base, 'img2.png')
with open("./Create_Figures/Demonstration.pickle", 'wb') as f:
pickle.dump([scores_simple, n_calib, thresholds, test_score, test_score_adv, x_test[idx2][k], y_test[idx2][k], x_test_adv_base], f)
print(y_test[idx2][k]) |
''' Define the Transformer model '''
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
###################################
#### different attention types ####
###################################
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
# self.mix_weights= nn.Parameter(torch.rand(1))
# self.mix_weights=nn.Linear(512,1)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
# attn = attn.masked_fill(mask, 0)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class TreeAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, attn_dropout=0.1):
super().__init__()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
# self.mix_weights= nn.Parameter(torch.rand(1))
# self.mix_weights=nn.Linear(512,1)
def forward(self, v, tree_attn, mask=None):
attn = tree_attn
if mask is not None:
attn = attn.masked_fill(mask, 0)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class RandAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, n_head,attn_dropout=0.1, learned_attn=False):
super().__init__()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=3)
self.rand_attn = nn.Parameter(torch.randn(n_head,512,512),requires_grad = learned_attn)
# self.mix_weights= nn.Parameter(torch.rand(1))
# self.mix_weights=nn.Linear(512,1)
def forward(self, v, mask=None):
## v is (batch,n_head,len,dim)
## mask is (batch, n_head,len,len)
attn = self.rand_attn[:,:v.shape[2],:v.shape[2]]
# print(attn[0])
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.matmul(attn, v) # (batch,n_head,len,dim)
return output, attn
class DenseAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, d_model,n_head,dv,attn_dropout=0.1):
super().__init__()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=3)
# self.mix_weights= nn.Parameter(torch.rand(1))
# self.mix_weights=nn.Linear(512,1)
self.dense_layers = nn.ModuleList([nn.Sequential(nn.Linear(d_model,dv),nn.ReLU(),nn.Linear(dv,512)) for _ in range(n_head)])
def forward(self, x, v, mask=None):
## x in (batch,len,dim)
## v is (batch,n_head,len,dim)
## attn is (batch, n_head,len,len)
attn = [dense(x)[:,:,:x.shape[1]] for dense in self.dense_layers] # b*l*l
attn = torch.stack(attn,1) #b*n_head*l*l
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
# attn = attn.masked_fill(mask, 0)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.matmul(attn, v) # (batch,n_head,len,dim)
return output, attn
###################################
#### position feed forward ####
###################################
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
##########################################################
#### multi head attention with diffent attention type ####
##########################################################
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1, attention_type='dense'):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention_type=attention_type
if self.attention_type=='dense':
self.attention = DenseAttention(d_model,n_head,d_v)
elif self.attention_type =='tree':
# self.w_qs = nn.Linear(d_model, n_head * d_k)
# self.w_ks = nn.Linear(d_model, n_head * d_k)
# nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
# nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
self.attention = TreeAttention()
elif self.attention_type=='fixed_rand':
self.attention=RandAttention(n_head,learned_attn=False)
elif self.attention_type=='learned_rand':
self.attention=RandAttention(n_head,learned_attn=True)
elif self.attention_type=='none':
pass
elif self.attention_type=='self-attention':
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
else:
raise Exception('No such attention type.')
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(p=dropout)
def forward(self, q, k, v, mask=None, tree_attn=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual = q
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
if self.attention_type=='dense':
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
v = v.permute(0, 2, 1, 3).contiguous() # b x n_head x lv x dv
mask = mask.repeat(n_head,1, 1, 1).permute(1, 0, 2, 3) # b x n_head x lv x lv
x=q
output, attn = self.attention(x,v,mask=mask) # (batch,n_head,len,dim)
output = output.permute(1,0,2,3)
elif self.attention_type=='tree':
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
tree_attn = tree_attn.repeat(n_head,1,1)
output, attn = self.attention(v,tree_attn,mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
elif self.attention_type=='fixed_rand':
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
v = v.permute(0, 2, 1, 3).contiguous() # b x n_head x lv x dv
mask = mask.repeat(n_head,1, 1, 1).permute(1, 0, 2, 3) # b x n_head x lv x lv
output, attn = self.attention(v,mask=mask) # (batch,n_head,len,dim)
output = output.permute(1,0,2,3)
elif self.attention_type=='learned_rand':
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
v = v.permute(0, 2, 1, 3).contiguous() # b x n_head x lv x dv
mask = mask.repeat(n_head,1, 1, 1).permute(1, 0, 2, 3) # b x n_head x lv x dv
output, attn = self.attention(v,mask=mask) # (batch,n_head,len,dim)
output = output.permute(1,0,2,3) # (n_head,batch,len,dim)
elif self.attention_type=='none':
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
output = v.permute(2,0,1,3) # (n_head,batch,len,dim)
attn = torch.zeros(output.shape[1],output.shape[2],output.shape[2])
elif self.attention_type=='self-attention':
# n_head = n_head_local+n_head_globa
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
output, attn = self.attention(q, k, v,mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output,attn
########################
#### Encoder Layer ####
#######################
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head,d_k, d_v, dropout=0.1,attention_type='self-attention'):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, dropout=dropout,attention_type=attention_type)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None, tree_attn=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask,tree_attn=tree_attn)
enc_output *= non_pad_mask
# print(enc_output.size()) #b*lv*d_v
enc_output = self.pos_ffn(enc_output)
enc_output *= non_pad_mask
return enc_output, enc_slf_attn
##################################
#### Complete Document Encoder####
##################################
class Encoder(nn.Module):
''' A encoder model with self attention mechanism. '''
def __init__(
self,n_layers, n_head,d_k, d_v,
d_model, d_inner, dropout=0.1, attention_type='self-attention'):
super().__init__()
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout,attention_type=attention_type)
for _ in range(n_layers)])
def forward(self, enc_output, edu_mask, return_attns=False,tree_attn=None):
enc_slf_attn_list = []
if (enc_output != enc_output).any():
print('nan at line 91 in EncoderForSumm.py')
non_pad_mask = edu_mask.unsqueeze(-1)
slf_attn_mask = (1-edu_mask).unsqueeze(1).expand(-1,edu_mask.size()[1],-1).type(torch.bool)
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(
enc_output,
non_pad_mask=non_pad_mask,
slf_attn_mask=slf_attn_mask,
tree_attn = tree_attn)
if (enc_output != enc_output).any():
print('nan at line 101 in EncoderForSumm.py')
if return_attns:
enc_slf_attn_list += [enc_slf_attn]
if return_attns:
return enc_output, enc_slf_attn_list
return enc_output
|
#!/usr/bin/env python3
import argparse
import subprocess
def main():
parser = argparse.ArgumentParser()
parser.add_argument('ldd')
parser.add_argument('bin')
args = parser.parse_args()
p, o, _ = subprocess.run([args.ldd, args.bin], stdout=subprocess.PIPE)
assert p == 0
o = o.decode()
assert 'libstuff.so =>' in o, 'libstuff so not in linker path.'
assert 'libstuff.so => not found' not in o, 'libstuff.so not found correctly'
if __name__ == '__main__':
main()
|
# Generated by Django 3.2.3 on 2021-07-03 09:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wanted', '0016_review'),
]
operations = [
migrations.RemoveField(
model_name='quest',
name='tags',
),
migrations.AlterField(
model_name='quest',
name='code',
field=models.TextField(blank=True, default=''),
),
]
|
import os
from pathlib import Path
def is_root() -> bool:
"""
Checks whether the current user is root (or, on Windows, an administrator).
"""
if os.name == 'nt':
try:
_dummy = list((Path(os.environ.get('SystemRoot', 'C:\\Windows')) / 'Temp').iterdir())
return True
except OSError:
return False
else:
return os.geteuid() == 0
|
#!/usr/bin/env python3
import sys
import pickle as pk
def print_pickle(pickle_fn):
with open(pickle_fn,'rb') as pkl:
stat=pk.load(pkl)
stat.keys()
print(stat['creation_stats'])
print(f"Output has {len(stat['Random_pokes'])} measurements.")
for pkl in stat['Random_pokes']:
print(f"input size = {pkl['m']:.1e}.")
print(f"mean latency (sec) = {pkl['memory__mean']:.3e}.")
print(f"std latency (sec) = {pkl['memory__std']:.3e}.")
print(f"mean of largest 100 latencies (sec) = {np.mean(pkl['memory__largest']):.3e}.")
print('')
if __name__ == "__main__":
print_pickle(sys.argv[1]) |
#coding:utf-8
#
# id: bugs.core_6279
# title: Put options in user management statements in any order
# decription:
# According to new syntax that is described in doc\\sql.extensions\\README.user_management, any statement that
# creates or modifies user, must now look like this:
# CREATE OR ALTER USER name [ SET ] [ options ];
# where OPTIONS is a list of following options:
# - PASSWORD 'password'
# - FIRSTNAME 'firstname'
# - MIDDLENAME 'middlename'
# - LASTNAME 'lastname'
# - ACTIVE
# - INACTIVE
# - USING PLUGIN name
# - TAGS ( tag [, tag [, tag ...]] )
#
# We add all options from this list, except 'INACTIVE', as separate records to the table 'TSYNTAX', field: 'token'.
# Then we generate all possible combinations of these options with requirement that each of them occurs in a generated
# record only once (see: f_generate_sql_with_combos).
# Query will contain 7 columns, one per each option, and we further concatenate them to the string.
# As result, this 'suffix part' will contain all tokens in all possible places will be created.
# We will add this 'suffix part' to 'create or alter user ...' statement.
#
# Finally, we redirect result of this query to a new .sql script (see: f_ddl_combinations_script) and run it.
# NOTE: total number of 'CREATE OR ALTER USER' statements in it will be 10080.
#
# Result of this .sql must be EMPTY: all statements have to be executed without error.
#
# It is crusial for this test to make .sql script run within SINGLE transaction otherwise performance will suffer.
# Also, we must inject 'SET BAIL ON;' at the start line of this script in order to make it stop when first error occurs.
#
# Checked on 4.0.0.1876 SS/CS: OK, 6.659/7.722s
#
# tracker_id: CORE-6279
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
# import time
# import subprocess
# from subprocess import Popen
# from fdb import services
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# db_conn.close()
#
# #--------------------------------------------
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for f in f_names_list:
# if type(f) == file:
# del_name = f.name
# elif type(f) == str:
# del_name = f
# else:
# print('Unrecognized type of element:', f, ' - can not be treated as file.')
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
# sql_init='''
# recreate table tsyntax( token varchar(100) );
# commit;
# insert into tsyntax( token ) values( 'password ''bar'' ' );
# insert into tsyntax( token ) values( 'firstname ''john'' ' );
# insert into tsyntax( token ) values( 'middlename ''ozzy'' ' );
# insert into tsyntax( token ) values( 'lastname ''osbourne'' ' );
# insert into tsyntax( token ) values( 'active' );
# insert into tsyntax( token ) values( 'inactive' );
# insert into tsyntax( token ) values( 'using plugin Srp' );
# insert into tsyntax( token ) values( 'tags ( foo = ''bar'', rio = ''gee'' )' );
# commit;
#
# set heading off;
# select 'set bail on;' from rdb$database union all
# select 'set echo off;' from rdb$database union all
# select 'commit;' from rdb$database union all
# select 'set autoddl off;' from rdb$database union all
# select 'commit;' from rdb$database
# ;
#
# with
# t as (
# select *
# from tsyntax x
# where x.token not in ('inactive')
# )
# ,b as (
# select trim(a.token) as a, trim(b.token) as b, trim(c.token) as c, trim(d.token) as d, trim(e.token) as e, trim(f.token) as f, trim(g.token) as g
# from t a
# left join t b on b.token not in (a.token)
# left join t c on c.token not in (a.token, b.token)
# left join t d on d.token not in (a.token, b.token, c.token)
# left join t e on e.token not in (a.token, b.token, c.token, d.token)
# left join t f on f.token not in (a.token, b.token, c.token, d.token, e.token)
# left join t g on g.token not in (a.token, b.token, c.token, d.token, e.token, f.token)
# )
# ,c as (
# select a || ' ' || b || ' ' || c || ' ' || d || ' ' || e || ' ' || f || ' ' || g || ';' as ddl_expr
# from b
# )
# select 'create or alter user tmp$c6279 ' || ddl_expr from c
# union all
# select 'create or alter user tmp$c6279 ' || replace(ddl_expr, ' active', ' inactive') from c;
#
# select 'rollback;' from rdb$database
# ;
#
# '''
#
#
# f_generate_sql_with_combos=open( os.path.join(context['temp_directory'],'tmp_c6279_pre.sql'), 'w')
# f_generate_sql_with_combos.write(sql_init)
# flush_and_close( f_generate_sql_with_combos )
#
# f_ddl_combinations_script=open( os.path.join(context['temp_directory'],'tmp_c6279_run.sql'), 'w', buffering = 0)
# f_create_combinations_err=open( os.path.join(context['temp_directory'],'tmp_c6279_pre.err'), 'w', buffering = 0)
#
# # PREPARING. GENERATE .SQL STATEMENTS WITH ALL POSSIBLE COMBINATIONS OF OPTIONS:
# ############
# subprocess.call( [context['isql_path'], dsn, '-q', '-i', f_generate_sql_with_combos.name], stdout=f_ddl_combinations_script, stderr=f_create_combinations_err )
# flush_and_close( f_ddl_combinations_script )
# flush_and_close( f_create_combinations_err )
#
# #------------------------------------------------------------------------------------------------
#
# f_run_ddl_combinations_log=open( os.path.join(context['temp_directory'],'tmp_c6279_run.log'), 'w', buffering = 0)
# f_run_ddl_combinations_err=open( os.path.join(context['temp_directory'],'tmp_c6279_run.err'), 'w', buffering = 0)
#
# # MAIN QUERY. CHECK ALL POSSIBLE COMBINATIONS OF OPTIONS:
# #############
# subprocess.call( [context['isql_path'], dsn, '-q', '-i', f_ddl_combinations_script.name], stdout=f_run_ddl_combinations_log, stderr=f_run_ddl_combinations_err )
# flush_and_close( f_run_ddl_combinations_log )
# flush_and_close( f_run_ddl_combinations_err )
#
# # Checks:
# #########
# # Both for prepare (creating main .sql) and for main sql script STDOUT and STDERR must be empty:
# for r in (f_run_ddl_combinations_log, f_create_combinations_err, f_run_ddl_combinations_err):
# with open(r.name, 'r') as f:
# for line in f:
# if line.split():
# print('UNEXPECTED OUTPUT IN ISQL RESULT: ' + line.strip() +'; file: ' + r.name )
#
# # Cleanup:
# ##########
# time.sleep(1)
#
# cleanup( ( f_generate_sql_with_combos,f_ddl_combinations_script,f_create_combinations_err,f_run_ddl_combinations_log,f_run_ddl_combinations_err ) )
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
|
import sys
import typing
def main() -> typing.NoReturn:
n = int(input())
s = [x == 'AND' for x in sys.stdin.read().split()]
cnts = []
cnt = 1
for x in s:
if x:
cnt += 1
continue
cnts.append(cnt)
cnt = 1
cnts.append(cnt)
p = 1
for cnt in cnts:
p *= (1 << cnt) - 1
print((1 << (n + 1)) - p)
main()
|
from django import forms
from PIL import Image
from django.utils.translation import ugettext_lazy as _
from . import models
class CategoryFrom(forms.ModelForm):
'''
Form for item's category
'''
class Meta:
model = models.Category
fields = ['name', ]
def __init__(self, *args, **kwargs):
'''
Method for initial values and functions
'''
# get the initial form class values
super().__init__(*args, **kwargs)
# initate is_active
self.fields['name'].widget.attrs['placeholder'] = _('Category name')
class ItemForm(forms.ModelForm):
'''
Form for items
'''
#############################################################
# Fields for image cropping
x = forms.FloatField(widget=forms.HiddenInput(), required=False)
y = forms.FloatField(widget=forms.HiddenInput(), required=False)
width = forms.FloatField(widget=forms.HiddenInput(), required=False)
height = forms.FloatField(widget=forms.HiddenInput(), required=False)
#############################################################
class Meta:
model = models.Item
fields = [
'code', 'desciption', 'barcode', 'stock_limit', 'price',
'location', 'category', 'is_active', 'photo'
]
def __init__(self, *args, **kwargs):
'''
Method for initial values and functions
'''
# get the initial form class values
super().__init__(*args, **kwargs)
# initate is_active
self.fields['code'].widget.attrs['placeholder'] = _('Item code')
self.fields['desciption'].widget.attrs[
'placeholder'] = _('Item desciption')
self.fields['barcode'].widget.attrs['placeholder'] = _('item barcode')
self.fields['price'].help_text = _("Enter itme's selling unit price")
self.fields['price'].widget.attrs['placeholder'] = _('item price')
self.fields['stock_limit'].widget.attrs['placeholder'] = _('Limit')
self.fields['stock_limit'].help_text = _(
"Enter the warning stock limit to re-order item")
self.fields['photo'].help_text = _(
"Item photo must be at least 500x500")
def save(self, commit=True):
'''
save for with cropped image
'''
form = super().save(commit=True)
# check if image sent
if form.photo:
try:
# get cropping data
x = self.cleaned_data.get('x')
y = self.cleaned_data.get('y')
w = self.cleaned_data.get('width')
h = self.cleaned_data.get('height')
# get the save image path
image = Image.open(form.photo.path)
cropped_image = image.crop((x, y, w + x, h + y))
# check if image size more than 500kb
if cropped_image.size[0] * cropped_image.size[1] < \
0.5 * 1024 * 1024:
raise forms.ValidationError(
_('Image file too small ( < 500kb )'))
else:
# resize the image after cropping
resized_image = cropped_image.resize(
(500, 500),
Image.ANTIALIAS)
resized_image.save(form.photo.path)
return form
except Exception as error_type:
print(error_type)
form.save()
return form
else:
form.save()
return form
class AssemblyItemForm(forms.ModelForm):
'''
Form for assembled items
'''
class Meta:
model = models.AssemblyItem
fields = ['item', 'sub_item', 'quantity', ]
class LocationForm(forms.ModelForm):
'''
Form for locations
'''
class Meta:
model = models.Location
fields = ['name', ]
def __init__(self, *args, **kwargs):
'''
Method for initial values and functions
'''
# get the initial form class values
super().__init__(*args, **kwargs)
# initate is_active
self.fields['name'].widget.attrs['placeholder'] = _('Location name')
class SubLocationForm(forms.ModelForm):
'''
Form for sublocations
'''
class Meta:
model = models.SubLocation
fields = ['location', 'name', ]
def __init__(self, *args, **kwargs):
'''
Method for initial values and functions
'''
# get the initial form class values
super().__init__(*args, **kwargs)
# initate is_active
self.fields['location'].help_text = _("Select location")
self.fields['name'].widget.attrs['placeholder'] = _('Location name')
self.fields['name'].help_text = _("Sub-location name")
class ItemMoveForm(forms.ModelForm):
'''
Form for items' movements
'''
# error messages for email and password matches
error_messages = {
'quantity_error': _(
"Quantity can't be negative for the selected location"),
'item_error': _("Must choose valid item"),
'type_error': _("Must choose valid type"),
'location_error': _("Must choose valid location"),
}
class Meta:
model = models.ItemMove
fields = ['item', 'type', 'location', 'quantity', 'note', ]
labels = {"note": _('Notes*')}
def clean_quantity(self):
'''
Method to clean quantity
'''
cleaned_data = self.cleaned_data
item = cleaned_data.get('item')
location = cleaned_data.get('location')
type = cleaned_data.get('type')
if not item:
raise forms.ValidationError(
self.error_messages['item_error'],
code='item_error')
if not location:
raise forms.ValidationError(
self.error_messages['location_error'],
code='location_error')
if not type:
raise forms.ValidationError(
self.error_messages['type_error'],
code='type_error')
quantity = int(self.cleaned_data.get('quantity'))
if item.get_quantity(location.id) < quantity and \
type == models.ItemMove.REMOVE:
raise forms.ValidationError(
self.error_messages['quantity_error'],
code='quantity_error')
else:
return quantity
def __init__(self, *args, **kwargs):
'''
Method for initial values and functions
'''
# get the initial form class values
super().__init__(*args, **kwargs)
# initate is_active
self.fields['note'].widget.attrs['required'] = 'required'
self.fields['note'].widget.attrs['placeholder'
] = _('Add notes for this custom move')
self.fields['quantity'].widget.attrs['min'] = 0
self.fields['quantity'].widget.attrs['placeholder'
] = _('Add desired quantity')
self.fields['location'].help_text = _("Select location")
self.fields['type'].help_text = _("Select movement type")
class ItemTransferForm(forms.ModelForm):
'''
Form for item transfer from location to another
'''
quantity = forms.IntegerField()
class Meta:
model = models.ItemTransfer
fields = ['item', 'old_location', 'new_location', 'quantity', ]
def __init__(self, *args, **kwargs):
'''
Method for initial values and functions
'''
# get the initial form class values
super().__init__(*args, **kwargs)
# initate is_active
self.fields['quantity'].widget.attrs['min'] = 0
self.fields['quantity'].widget.attrs[
'placeholder'] = _('Add desired quantity')
self.fields['old_location'].help_text = _("Select old location")
self.fields['new_location'].help_text = _("Select new location")
|
lst1=[['*','*','*','*','1'],['*','*','*','1','2'],['*','*','1','2','3'],['*','1','2','3','4'],['1','2','3','4','5']]
for i in lst1:
print(*i)
|
#!/usr/bin/env python
# coding: utf-8
import os
from PIL import Image
from numpy import *
from pylab import *
def process_image(imagename,resultname,params="--edge-thresh 10 --peak-thresh 5"):
""" Process an image and save the results in a file. """
if imagename[-3:] != 'pgm':
# create a pgm file
im = Image.open(imagename).convert('L')
im.save('tmp.pgm')
imagename = 'tmp.pgm'
# 调用SIFT的命令
cmmd = str("sift "+imagename+" --output="+resultname+" "+params)
os.system(cmmd)
# print('processed', imagename, 'to', resultname)
return resultname
def read_features_from_file(filename):
""" Read feature properties and return in matrix form. """
f = loadtxt(filename)
return f[:,:4],f[:,4:] # feature locations, descriptors
def write_features_to_file(filename,locs,desc):
""" Save feature location and descriptor to file. """
savetxt(filename,hstack((locs,desc)))
def plot_features(im,locs,circle=False):
""" Show image with features. input: im (image as array),
locs (row, col, scale, orientation of each feature). """
def draw_circle(c,r):
t = arange(0,1.01,.01)*2*pi
x = r*cos(t) + c[0]
y = r*sin(t) + c[1]
plot(x,y,'b',linewidth=2)
imshow(im)
if circle:
for p in locs:
draw_circle(p[:2],p[2])
else:
plot(locs[:,0],locs[:,1],'ob')
axis('off')
def match(desc1,desc2):
""" For each descriptor in the first image,
select its match in the second image.
input: desc1 (descriptors for the first image),
desc2 (same for second image). """
desc1 = array([d/linalg.norm(d) for d in desc1])
desc2 = array([d/linalg.norm(d) for d in desc2])
dist_ratio = 0.6
desc1_size = desc1.shape
matchscores = zeros((desc1_size[0]),'int')
desc2t = desc2.T # precompute matrix transpose
for i in range(desc1_size[0]):
dotprods = dot(desc1[i,:],desc2t) # vector of dot products
dotprods = 0.9999*dotprods
# inverse cosine and sort, return index for features in second image
indx = argsort(arccos(dotprods))
# check if nearest neighbor has angle less than dist_ratio times 2nd
if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:
matchscores[i] = int(indx[0])
return matchscores
def appendimages(im1,im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = concatenate((im1,zeros((rows2-rows1,im1.shape[1]))), axis=0)
elif rows1 > rows2:
im2 = concatenate((im2,zeros((rows1-rows2,im2.shape[1]))), axis=0)
# if none of these cases they are equal, no filling needed.
return concatenate((im1,im2), axis=1)
def plot_matches(im1,im2,locs1,locs2,matchscores,show_below=True):
""" Show a figure with lines joining the accepted matches
input: im1,im2 (images as arrays), locs1,locs2 (location of features),
matchscores (as output from 'match'), show_below (if images should be shown below). """
im3 = appendimages(im1,im2)
if show_below:
im3 = vstack((im3,im3))
# show image
imshow(im3)
# draw lines for matches
cols1 = im1.shape[1]
for i,m in enumerate(matchscores):
if m>0:
plot([locs1[i][0],locs2[m][0]+cols1],[locs1[i][1],locs2[m][1]],'c')
axis('off')
def match_twosided(desc1,desc2):
""" Two-sided symmetric version of match(). """
matches_12 = match(desc1,desc2)
matches_21 = match(desc2,desc1)
ndx_12 = matches_12.nonzero()[0]
# remove matches that are not symmetric
for n in ndx_12:
if matches_21[int(matches_12[n])] != n:
matches_12[n] = 0
return matches_12
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: gdkpixbuf2.py 1053 2007-07-18 13:32:00Z Alex.Holkner $'
from ctypes import *
from pyglet.gl import *
from pyglet.image import *
from pyglet.image.codecs import *
import pyglet.lib
import pyglet.window
gdk = pyglet.lib.load_library('gdk-x11-2.0')
gdkpixbuf = pyglet.lib.load_library('gdk_pixbuf-2.0')
GdkPixbufLoader = c_void_p
GdkPixbuf = c_void_p
gdkpixbuf.gdk_pixbuf_loader_new.restype = GdkPixbufLoader
gdkpixbuf.gdk_pixbuf_loader_get_pixbuf.restype = GdkPixbuf
gdkpixbuf.gdk_pixbuf_get_pixels.restype = c_void_p
class GdkPixbuf2ImageDecoder(ImageDecoder):
def get_file_extensions(self):
return ['.png', '.xpm', '.jpg', '.jpeg', '.tif', '.tiff', '.pnm',
'.ras', '.bmp', '.gif']
def decode(self, file, filename):
data = file.read()
# Load into pixbuf
err = c_int()
loader = gdkpixbuf.gdk_pixbuf_loader_new()
gdkpixbuf.gdk_pixbuf_loader_write(loader, data, len(data), byref(err))
pixbuf = gdkpixbuf.gdk_pixbuf_loader_get_pixbuf(loader)
if not gdkpixbuf.gdk_pixbuf_loader_close(loader, byref(err)):
raise ImageDecodeException(filename)
if not pixbuf:
raise ImageDecodeException('Unable to load pixbuf: %s' % filename)
# Get format and dimensions
width = gdkpixbuf.gdk_pixbuf_get_width(pixbuf)
height = gdkpixbuf.gdk_pixbuf_get_height(pixbuf)
channels = gdkpixbuf.gdk_pixbuf_get_n_channels(pixbuf)
rowstride = gdkpixbuf.gdk_pixbuf_get_rowstride(pixbuf)
has_alpha = gdkpixbuf.gdk_pixbuf_get_has_alpha(pixbuf)
pixels = gdkpixbuf.gdk_pixbuf_get_pixels(pixbuf)
# Copy pixel data.
buffer = (c_ubyte * (rowstride * height))()
memmove(buffer, pixels, rowstride * (height - 1) + width * channels)
# Release pixbuf
gdk.g_object_unref(pixbuf)
# Determine appropriate GL type
if channels == 3:
format = 'RGB'
else:
format = 'RGBA'
return ImageData(width, height, format, buffer, -rowstride)
def get_decoders():
return [GdkPixbuf2ImageDecoder()]
def get_encoders():
return []
def init():
gdk.g_type_init()
init()
|
import sys
import math
import pytest
from sdl2 import endian
class TestSDLEndian(object):
__tags__ = ["sdl"]
def test_SDL_BYTEORDER(self):
if sys.byteorder == "little":
assert endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN
else:
assert endian.SDL_BYTEORDER == endian.SDL_BIG_ENDIAN
def test_SDL_Swap16(self):
assert endian.SDL_Swap16(0xFF00) == 0x00FF
assert endian.SDL_Swap16(0x0001) == 0x0100
assert endian.SDL_Swap16(0x0032) == 0x3200
assert endian.SDL_Swap16(0x0FF0) == 0xF00F
assert endian.SDL_Swap16(0x00FF) == 0xFF00
assert endian.SDL_Swap16(0x1234) == 0x3412
if sys.byteorder == "little":
assert endian.SDL_Swap16 == endian.SDL_SwapBE16
assert endian.SDL_Swap16 != endian.SDL_SwapLE16
else:
assert endian.SDL_Swap16 != endian.SDL_SwapBE16
assert endian.SDL_Swap16 == endian.SDL_SwapLE16
def test_SDL_Swap32(self):
assert endian.SDL_Swap32(0xFF000000) == 0x000000FF
assert endian.SDL_Swap32(0x00FF0000) == 0x0000FF00
assert endian.SDL_Swap32(0x0000FF00) == 0x00FF0000
assert endian.SDL_Swap32(0x000000FF) == 0xFF000000
assert endian.SDL_Swap32(0x12345678) == 0x78563412
assert endian.SDL_Swap32(0xFF00FF00) == 0x00FF00FF
if sys.byteorder == "little":
assert endian.SDL_Swap32 == endian.SDL_SwapBE32
assert endian.SDL_Swap32 != endian.SDL_SwapLE32
else:
assert endian.SDL_Swap32 != endian.SDL_SwapBE32
assert endian.SDL_Swap32 == endian.SDL_SwapLE32
def test_SDL_Swap64(self):
assert endian.SDL_Swap64(0xFF00000000000000) == 0x00000000000000FF
assert endian.SDL_Swap64(0x00FF000000000000) == 0x000000000000FF00
assert endian.SDL_Swap64(0x0000FF0000000000) == 0x0000000000FF0000
assert endian.SDL_Swap64(0x000000FF00000000) == 0x00000000FF000000
assert endian.SDL_Swap64(0x00000000FF000000) == 0x000000FF00000000
assert endian.SDL_Swap64(0x0000000000FF0000) == 0x0000FF0000000000
assert endian.SDL_Swap64(0x000000000000FF00) == 0x00FF000000000000
assert endian.SDL_Swap64(0x00000000000000FF) == 0xFF00000000000000
assert endian.SDL_Swap64(0x0123456789ABCDEF) == 0xEFCDAB8967452301
if sys.byteorder == "little":
assert endian.SDL_Swap64 == endian.SDL_SwapBE64
assert endian.SDL_Swap64 != endian.SDL_SwapLE64
else:
assert endian.SDL_Swap64 != endian.SDL_SwapBE64
assert endian.SDL_Swap64 == endian.SDL_SwapLE64
def test_SDL_SwapFloat(self):
v = -100.0
while v < 101:
p = endian.SDL_SwapFloat(v)
assert p != v
assert endian.SDL_SwapFloat(p) == v
v += 0.1
values = (sys.float_info.epsilon,
sys.float_info.min,
sys.float_info.max,
- sys.float_info.min,
math.pi,
- math.pi
)
for v in values:
p = endian.SDL_SwapFloat(v)
assert p != v
assert endian.SDL_SwapFloat(p) == v
if sys.byteorder == "little":
assert endian.SDL_SwapFloat == endian.SDL_SwapFloatBE
assert endian.SDL_SwapFloat != endian.SDL_SwapFloatLE
else:
assert endian.SDL_SwapFloat != endian.SDL_SwapFloatBE
assert endian.SDL_SwapFloat == endian.SDL_SwapFloatLE
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.conf.urls import patterns
urlpatterns = patterns(
'home_application.views',
##### page ####
(r'^$', 'home'),
(r'^dev-guide/$', 'dev_guide'),
(r'^contactus/$', 'contactus'),
(r'^history/$', 'result_page'),
(r'^cpu_chart/$', 'cpu_statistics_page'),
(r'^admin-page/$', 'admin_page'),
##### API method ######
(r'^get_task_by_app/(?P<app_id>\w+)/$', 'get_task_by_app'),
(r'^get_host_by_app/(?P<app_id>\w+)/$', 'get_host_by_app'),
(r'^get_app_by_user/$', 'get_app_by_user'),
(r'^execute_task/$', 'execute_task'),
(r'^filter_history/$', 'filter_history'),
(r'^get_task_log/(?P<job_id>\w+)/$', 'get_task_log'),
(r'^get_cpu_statistics/$', 'get_cpu_statistics'),
#### method test #####
(r'^test/$', 'test'),
)
|
#!/usr/bin/python3
# coding=utf8
import sys
import import_path
import kinematics as kinematics
import HiwonderSDK.Board as Board
HWSONAR = None
ik = kinematics.IK()
if sys.version_info.major == 2:
print('Please run this program with python3!')
sys.exit(0)
# Initial position
def initMove():
HWSONAR.setRGBMode(0)
HWSONAR.setRGB(1, (0, 0, 0))
HWSONAR.setRGB(2, (0, 0, 0))
Board.setPWMServoPulse(1, 1500, 500)
Board.setPWMServoPulse(2, 1500, 500)
def reset():
return None
def init():
initMove()
print("Transport Init")
return None
def start():
print("Transport Start")
return None
def stop():
print("Transport Stop")
return None
def exit():
print("Transport Exit")
ik.stand(ik.initial_pos, t=1000)
return None
def run(img):
return img
|
__author__ = 'gpratt'
'''
Created on Jun 21, 2013
@author: gabrielp
'''
import unittest
import tests
from gscripts.clipseq.demux_paired_end import reformat_read, read_has_barcode
class Test(unittest.TestCase):
def test_read_has_barcode(self):
"""
Test hamming distance with hamming of 0
:return:
"""
barcodes = ['GTTG', 'AAAA']
seq_1 = "GTTGTTGTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n"
barcode = read_has_barcode(barcodes, seq_1)
self.assertEqual(barcode, barcodes[0])
def test_read_has_barcode_hamming_1(self):
"""
Test hamming distance with hamming of 0 and 1
:return:
"""
barcodes = ['GTTG', 'AAAA']
seq_1 = "ATTGTTGTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n"
barcode = read_has_barcode(barcodes, seq_1, max_hamming_distance=0)
self.assertEqual(barcode, None)
seq_1 = "ATTGTTGTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n"
barcode = read_has_barcode(barcodes, seq_1, max_hamming_distance=1)
self.assertEqual(barcode, barcodes[0])
def test_variable_length_barcodes(self):
"""
Test hamming distance with hamming of 0 and 1
:return:
"""
barcodes = ['GTTG', 'AAAAA']
seq_1 = "ATTGTTGTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n"
barcode = read_has_barcode(barcodes, seq_1, max_hamming_distance=1)
self.assertEqual(barcode, barcodes[0])
seq_1 = "AAAAATGTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n"
barcode = read_has_barcode(barcodes, seq_1, max_hamming_distance=1)
self.assertEqual(barcode, barcodes[1])
def test_reformat_read(self):
"""
Tests basic reformatting of read to output known correct result
"""
barcodes = {"GTTG": "R01"}
name_1 = "@M01356\n"
seq_1 = "GTTGTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n"
plus_1 = "+\n"
quality_1 = "BBBBCBCFFFFFGGGGGGGGGGHHHHHHHHHHHHHHHHHHHHHHH\n"
name_2 = "@M01356\n"
seq_2 = "CCGGTTGTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n"
plus_2 = "+\n"
quality_2 = "BBBBCBCFFFFFGGGGGGGGGGHHHHHHHHHHHHHHHHHHHHHHH\n"
barcode, randomer, read_1, read_2 = reformat_read(name_1, seq_1, plus_1, quality_1,
name_2, seq_2, plus_2, quality_2,
barcodes)
self.assertEqual("GTTG", barcode)
self.assertEqual("CC", randomer)
self.assertEqual(read_1,
"@CC:M01356\nTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n+\nCBCFFFFFGGGGGGGGGGHHHHHHHHHHHHHHHHHHHHHHH\n")
self.assertEqual(read_2,
"@CC:M01356\nGGTTGTATTTCATTCTGCCCAGAGCAAAATACATGTGACAAAA\n+\nBBCBCFFFFFGGGGGGGGGGHHHHHHHHHHHHHHHHHHHHHHH\n")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
from ptrlib import *
# 3XPL01717
# _H4CK3R_
sock = Socket("lazy.chal.seccon.jp", 33333)
# login
for i in range(3):
sock.recvline()
sock.sendline("2")
#sock.sendlineafter(": ", "A" * 31)
sock.sendlineafter(": ", "_H4CK3R_")
sock.sendlineafter(": ", "3XPL01717")
# manage
sock.sendline("4")
sock.sendlineafter(": ", "lazy")
sock.recvuntil("bytes")
lazy = sock.recvonce(14216)
with open("lazy", "wb") as f:
f.write(lazy)
|
import argparse
import pandas as pd
from traffic_counter import TrafficCounter
# source : https://github.com/andresberejnoi/ComputerVision
def CLI():
# Define default values here to make documentation self-updating
minArea_default = 50 # todo 200 A4 300
direction_default = ['H', '0.5']
numCount_default = 5 # todo 10
videoWidth_default = 640
videoParams_default = ['mjpg', 'avi']
startingFrame_default = 10
parser = argparse.ArgumentParser(description='Finds the contours on a video file') # creates a parser object
parser.add_argument('-p', '--path', type=str, help="""A video filename or path.
Works better with .avi files.
If no path or name is provided, the camera will be used instead.""") # instead of using metavar='--path', just type '--path'. For some reason the metavar argument was causing problems
parser.add_argument('-a', '--minArea', type=int,
help=f'The minimum area (in pixels) to draw a bounding box (default is {minArea_default})',
default=minArea_default)
parser.add_argument('-d', '--direction', type=str, default=direction_default, nargs=2, help=f"""A character: H or V
representing the orientation of the count line. H is horizontal, V is vertical.
If not provided, the default is {direction_default[0]},{direction_default[1]}. The second parameter
is a float number from 0 to 1 indicating the place at which the
line should be drawn.""")
parser.add_argument('-n', '--numCount', type=int, default=numCount_default,
help=f"""The number of contours to be detected by the program (default is {numCount_default}).""")
parser.add_argument('-w', '--webcam', type=int, nargs='+',
help="""Allows the user to specify which to use as the video source""")
parser.add_argument('--rgb', action='store_true', help="Boolean flag to use rbg colors.")
parser.add_argument('-vo', '--video_out', type=str, default="", help="Provide a video filename to output")
parser.add_argument('-vw', '--video_width', type=int, default=videoWidth_default,
help=f"Videos will be resized to this width (default is {videoWidth_default}). Height will be computed automatically to preserve aspect ratio")
parser.add_argument('-vp', '--video_params', type=str, default=videoParams_default, nargs=2,
help=f"Provide video codec and extension (in that order) for the output video. Example: `--video_params mjpg avi`. Default values are {videoParams_default[0]} {videoParams_default[1]}")
parser.add_argument('-sf', '--starting_frame', type=int, default=startingFrame_default,
help=f"Select the starting frame for video analysis (default is {startingFrame_default}). All frames before that will still be used for the background average")
args = parser.parse_args()
return args
def make_video_params_dict(video_params):
codec = video_params[0]
extension = video_params[1]
params_dict = {
'codec': codec,
'extension': extension,
}
return params_dict
def main(args):
video_source = args.path
line_direction = args.direction[0]
line_position = float(args.direction[1])
video_width = args.video_width
min_area = int(args.minArea)
video_out = args.video_out
numCnts = int(args.numCount)
video_params = make_video_params_dict(args.video_params)
starting_frame = args.starting_frame
tc = TrafficCounter(video_source, line_direction, line_position, video_width, min_area, video_out, numCnts,
video_params, starting_frame)
tc.main_loop()
if __name__ == '__main__':
args = CLI()
main(args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.