code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf8 -*-
grl = False
m_or_d = 0
import numpy as np
from progressbar import ProgressBar
from ROOT import gRandom
Nevts = 100000
scores = []
for i in xrange(2):
gRandom.SetSeed(i)
backgrounds = []
signals = []
for j in xrange(Nevts):
s_or_b = gRandom.Binomial(1,0.5)
train_or_test = gRandom.Binomial(1,0.5)
if s_or_b == 0:
# x_1 is perfectly simulated, with poor resolution
x_1 = gRandom.Gaus(-0.5,2.2)
if m_or_d == 0:
x_2 = gRandom.Gaus(-1.0,0.2)
x_3 = gRandom.Gaus(x_2,1.0)
else:
x_2 = gRandom.Gaus(+1.0,0.2)
x_3 = gRandom.Gaus(x_2,1.3)
backgrounds.append([x_1,x_2,x_3])
else:
x_1 = gRandom.Gaus(+0.5,2.2)
if m_or_d == 0:
x_2 = gRandom.Gaus(-1.0,0.2)
x_3 = gRandom.Gaus(x_2+0.5,1.3)
else:
x_2 = gRandom.Gaus(+1.0,0.2)
x_3 = gRandom.Gaus(x_2+0.5,1.3)
signals.append([x_1,x_2,x_3])
import sys
sys.path.insert(0,'/home/pseyfert/coding/caffe/python')
import caffe
caffe.set_mode_cpu()
if not grl:
solver = caffe.get_solver("solver.prototxt")
else:
solver = caffe.get_solver("grlsolver.prototxt")
niter=9000
losses = np.zeros(niter+1)
dlosses = np.zeros(niter+1)
if grl:
solver.test_nets[0].copy_from('grlsnapshot_iter_'+str(niter)+'.caffemodel')
else:
solver.test_nets[0].copy_from('standardsnapshot_iter_'+str(niter)+'.caffemodel')
signal_responses = []
for e in ProgressBar()(xrange( len(signals))):
solver.test_nets[0].blobs['data'].data[...] = np.array(signals[e])
out = solver.test_nets[0].forward(start='ip1') #don't like out so far
#resp[0] = solver.test_nets[0].blobs['fc5'].data
resp1 = solver.test_nets[0].blobs['lp_fc8'].data[0][0]
resp2 = solver.test_nets[0].blobs['lp_fc8'].data[0][1]
signal_responses.append(resp2)
background_responses = []
for e in ProgressBar()(xrange( len(backgrounds))):
solver.test_nets[0].blobs['data'].data[...] = np.array(backgrounds[e])
out = solver.test_nets[0].forward(start='ip1') #don't like out so far
#resp[0] = solver.test_nets[0].blobs['fc5'].data
resp1 = solver.test_nets[0].blobs['lp_fc8'].data[0][0]
resp2 = solver.test_nets[0].blobs['lp_fc8'].data[0][1]
background_responses.append(resp2)
from matplotlib import pylab as plt
import matplotlib
matplotlib.rc_file("../lhcb-matplotlibrc/matplotlibrc")
fig,ax = plt.subplots(1,1,figsize=(12,12))
ax.hist(signal_responses,range=[0,1],alpha=0.5,color='blue')
ax.hist(background_responses,range=[0,1],alpha=0.5,color='red')
from sklearn.metrics import roc_curve, auc
resps = signal_responses+background_responses
one = [1 for s in signal_responses]
zero = [0 for b in background_responses]
trues = one+zero
fpr, tpr,_ = roc_curve(trues,resps)
score = auc(fpr,tpr)
scores.append(score)
if i == 1:
from ROOT import TFile, TH1F
f = TFile("responses_"+str(m_or_d)+"_"+str(grl)+".root","recreate")
resps = TH1F("resps","resps",500,0,1)
for r in signal_responses:
resps.Fill(r)
f.WriteTObject(resps)
f.Close()
m = np.mean(np.array(scores))
s = np.std(np.array(scores))
from math import sqrt
print u"score is ",m,u"±",s/sqrt(20)
| pseyfert/transfer-learning | toy/apply_advanced.py | Python | mit | 3,608 |
import os
from env import (
DJANGO_DEV,
)
if DJANGO_DEV:
from dev import *
else:
from live import *
| ginabythebay/iddocs | mysite/settings/__init__.py | Python | apache-2.0 | 117 |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import inspect
import os
import sys
import warnings
import decorator
import jsonschema
from oslo_config import cfg
from oslo_utils import encodeutils
import prettytable
import six
import sqlalchemy.exc
from rally.common.i18n import _
from rally.common import log as logging
from rally.common.plugin import discover
from rally.common.plugin import info
from rally.common import version
from rally import exceptions
CONFIG_SEARCH_PATHS = [sys.prefix + "/etc/rally", "~/.rally", "/etc/rally"]
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# Some CLI-specific constants
MARGIN = 3
def find_config_files(path_list):
for path in path_list:
abspath = os.path.abspath(os.path.expanduser(path))
confname = abspath + "/rally.conf"
if os.path.isfile(confname):
return [confname]
return None
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, "__self__", None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None,
table_label=None, print_header=True, print_border=True,
out=sys.stdout):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
:param table_label: Label to use as header for the whole table.
:param print_header: print table header.
:param print_border: print table border.
:param out: stream to write output to.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{"labels": field_labels, "fields": fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {"sortby": field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = "l"
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(" ", "_")
else:
field_name = field.lower().replace(" ", "_")
data = getattr(o, field_name, "")
row.append(data)
pt.add_row(row)
if not print_border or not print_header:
pt.set_style(prettytable.PLAIN_COLUMNS)
pt.left_padding_width = 0
pt.right_padding_width = 1
table_body = pt.get_string(header=print_header,
border=print_border,
**kwargs) + "\n"
table_header = ""
if table_label:
table_width = table_body.index("\n")
table_header = make_table_header(table_label, table_width)
table_header += "\n"
if six.PY3:
if table_header:
out.write(encodeutils.safe_encode(table_header).decode())
out.write(encodeutils.safe_encode(table_body).decode())
else:
if table_header:
out.write(encodeutils.safe_encode(table_header))
out.write(encodeutils.safe_encode(table_body))
def make_table_header(table_label, table_width,
junction_char="+", horizontal_char="-",
vertical_char="|"):
"""Generalized way make a table header string.
:param table_label: label to print on header
:param table_width: total width of table
:param junction_char: character used where vertical and
horizontal lines meet.
:param horizontal_char: character used for horizontal lines.
:param vertical_char: character used for vertical lines.
:returns string
"""
if len(table_label) >= (table_width - 2):
raise ValueError(_("Table header %s is longer than total"
"width of the table."))
label_and_space_width = table_width - len(table_label) - 2
padding = 0 if label_and_space_width % 2 == 0 else 1
half_table_width = label_and_space_width // 2
left_spacing = (" " * half_table_width)
right_spacing = (" " * (half_table_width + padding))
border_line = "".join((junction_char,
(horizontal_char * (table_width - 2)),
junction_char,))
label_line = "".join((vertical_char,
left_spacing,
table_label,
right_spacing,
vertical_char,))
return "\n".join((border_line, label_line,))
def make_header(text, size=80, symbol="-"):
"""Unified way to make header message to CLI.
:param text: what text to write
:param size: Length of header decorative line
:param symbol: What symbol to use to create header
"""
header = symbol * size + "\n"
header += " %s\n" % text
header += symbol * size + "\n"
return header
def suppress_warnings(f):
f._suppress_warnings = True
return f
@decorator.decorator
def process_keystone_exc(f, *args, **kwargs):
from keystoneclient import exceptions as keystone_exc
try:
return f(*args, **kwargs)
except keystone_exc.Unauthorized as e:
print(_("User credentials are wrong! \n%s") % e)
return 1
except keystone_exc.AuthorizationFailure as e:
print(_("Failed to authorize! \n%s") % e)
return 1
except keystone_exc.ConnectionRefused as e:
print(_("Rally can't reach the Keystone service! \n%s") % e)
return 1
class CategoryParser(argparse.ArgumentParser):
"""Customized arguments parser
We need this one to override hardcoded behavior.
So, we want to print item's help instead of 'error: too few arguments'.
Also, we want not to print positional arguments in help message.
"""
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
# INFO(oanufriev) _action_groups[0] contains positional arguments.
for action_group in self._action_groups[1:]:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def error(self, message):
self.print_help(sys.stderr)
sys.exit(2)
def pretty_float_formatter(field, ndigits=None):
"""Create a formatter function for the given float field.
:param field: a float object attribute name to be formatted.
:param ndigits: The number of digits after decimal point after round.
If None, then no rounding will be done.
:returns: the formatter function
"""
def _formatter(obj):
value = getattr(obj, field)
if value is not None:
if ndigits is not None:
return round(value, ndigits)
else:
return value
else:
return "n/a"
return _formatter
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault("args", []).insert(0, (args, kwargs))
return func
return _decorator
def alias(command_name):
"""Allow cli to use alias command name instead of function name.
:param command_name: desired command name
"""
def decorator(func):
func.alias = command_name
return func
return decorator
def deprecated_args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault("args", []).insert(0, (args, kwargs))
func.__dict__.setdefault("deprecated_args", [])
func.deprecated_args.append(args[0])
if "help" in kwargs.keys():
warn_message = "DEPRECATED!"
kwargs["help"] = " ".join([warn_message, kwargs["help"]])
return func
return _decorator
def _methods_of(cls):
"""Get all callable methods of a class that don't start with underscore.
:returns: a list of tuples of the form (method_name, method)
"""
# The idea of unbound methods exists in Python 2 and was removed in
# Python 3, so "inspect.ismethod" is used here for Python 2 and
# "inspect.isfunction" for Python 3.
all_methods = inspect.getmembers(
cls, predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x))
methods = [m for m in all_methods if not m[0].startswith("_")]
return methods
def _compose_category_description(category):
descr_pairs = _methods_of(category)
description = ""
doc = category.__doc__
if doc:
description = doc.strip()
if descr_pairs:
description += "\n\nCommands:\n"
sublen = lambda item: len(item[0])
first_column_len = max(map(sublen, descr_pairs)) + MARGIN
for item in descr_pairs:
name = getattr(item[1], "alias", item[0])
if item[1].__doc__:
doc = info.parse_docstring(
item[1].__doc__)["short_description"]
else:
doc = ""
name += " " * (first_column_len - len(name))
description += " %s%s\n" % (name, doc)
return description
def _compose_action_description(action_fn):
description = ""
if action_fn.__doc__:
parsed_doc = info.parse_docstring(action_fn.__doc__)
short = parsed_doc.get("short_description")
long = parsed_doc.get("long_description")
description = "%s\n\n%s" % (short, long) if long else short
return description
def _add_command_parsers(categories, subparsers):
# INFO(oanufriev) This monkey patching makes our custom parser class to be
# used instead of native. This affects all subparsers down from
# 'subparsers' parameter of this function (categories and actions).
subparsers._parser_class = CategoryParser
parser = subparsers.add_parser("version")
parser = subparsers.add_parser("bash-completion")
parser.add_argument("query_category", nargs="?")
for category in categories:
command_object = categories[category]()
descr = _compose_category_description(categories[category])
parser = subparsers.add_parser(
category, description=descr,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest="action")
for method_name, method in _methods_of(command_object):
descr = _compose_action_description(method)
parser = category_subparsers.add_parser(
getattr(method, "alias", method_name),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=descr, help=descr)
action_kwargs = []
for args, kwargs in getattr(method, "args", []):
# FIXME(markmc): hack to assume dest is the arg name without
# the leading hyphens if no dest is supplied
kwargs.setdefault("dest", args[0][2:])
action_kwargs.append(kwargs["dest"])
kwargs["dest"] = "action_kwarg_" + kwargs["dest"]
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=method)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument("action_args", nargs="*")
def validate_deprecated_args(argv, fn):
if (len(argv) > 3
and (argv[2] == fn.__name__)
and getattr(fn, "deprecated_args", None)):
for item in fn.deprecated_args:
if item in argv[3:]:
LOG.warning("Deprecated argument %s for %s." % (item,
fn.__name__))
def run(argv, categories):
parser = lambda subparsers: _add_command_parsers(categories, subparsers)
category_opt = cfg.SubCommandOpt("category",
title="Command categories",
help="Available categories",
handler=parser)
CONF.register_cli_opt(category_opt)
help_msg = ("Additional custom plugin locations. Multiple files or "
"directories may be specified. All plugins in the specified"
" directories and subdirectories will be imported. Plugins in"
" /opt/rally/plugins and ~/.rally/plugins will always be "
"imported.")
CONF.register_cli_opt(cfg.ListOpt("plugin-paths",
default=os.environ.get(
"RALLY_PLUGIN_PATHS"),
help=help_msg))
try:
CONF(argv[1:], project="rally", version=version.version_string(),
default_config_files=find_config_files(CONFIG_SEARCH_PATHS))
logging.setup("rally")
if not CONF.get("log_config_append"):
# The below two lines are to disable noise from request module. The
# standard way should be we make such lots of settings on the root
# rally. However current oslo codes doesn't support such interface.
# So I choose to use a 'hacking' way to avoid INFO logs from
# request module where user didn't give specific log configuration.
# And we could remove this hacking after oslo.log has such
# interface.
LOG.debug("INFO logs from urllib3 and requests module are hide.")
requests_log = logging.getLogger("requests").logger
requests_log.setLevel(logging.WARNING)
urllib3_log = logging.getLogger("urllib3").logger
urllib3_log.setLevel(logging.WARNING)
# NOTE(wtakase): This is for suppressing boto error logging.
LOG.debug("ERROR log from boto module is hide.")
boto_log = logging.getLogger("boto").logger
boto_log.setLevel(logging.CRITICAL)
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv)
except Exception:
print(_("sudo failed, continuing as if nothing happened"))
print(_("Please re-run %s as root.") % argv[0])
return(2)
if CONF.category.name == "version":
print(version.version_string())
return(0)
if CONF.category.name == "bash-completion":
print(_generate_bash_completion_script())
return(0)
fn = CONF.category.action_fn
fn_args = [encodeutils.safe_decode(arg)
for arg in CONF.category.action_args]
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, "action_kwarg_" + k)
if v is None:
continue
if isinstance(v, six.string_types):
v = encodeutils.safe_decode(v)
fn_kwargs[k] = v
# call the action with the remaining arguments
# check arguments
try:
validate_args(fn, *fn_args, **fn_kwargs)
except MissingArgs as e:
# NOTE(mikal): this isn't the most helpful error message ever. It is
# long, and tells you a lot of things you probably don't want to know
# if you just got a single arg wrong.
print(fn.__doc__)
CONF.print_help()
print("Missing arguments:")
for missing in e.missing:
for arg in fn.args:
if arg[1].get("dest", "").endswith(missing):
print(" " + arg[0][0])
break
return(1)
try:
for path in CONF.plugin_paths or []:
discover.load_plugins(path)
validate_deprecated_args(argv, fn)
if getattr(fn, "_suppress_warnings", False):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ret = fn(*fn_args, **fn_kwargs)
else:
ret = fn(*fn_args, **fn_kwargs)
return(ret)
except (IOError, TypeError, ValueError, exceptions.DeploymentNotFound,
exceptions.TaskNotFound, jsonschema.ValidationError) as e:
if logging.is_debug():
LOG.exception(e)
print(e)
return 1
except sqlalchemy.exc.OperationalError as e:
if logging.is_debug():
LOG.exception(e)
print(e)
print("Looks like Rally can't connect to it's DB.")
print("Make a sure that connection string in rally.conf is proper:")
print(CONF.database.connection)
return 1
except Exception:
print(_("Command failed, please check log for more info"))
raise
def _generate_bash_completion_script():
from rally.cli import main
bash_data = """#!/bin/bash
# Standalone _filedir() alternative.
# This exempts from dependence of bash completion routines
function _rally_filedir()
{
test "${1}" \\
&& COMPREPLY=( \\
$(compgen -f -- "${cur}" | grep -E "${1}") \\
$(compgen -o plusdirs -- "${cur}") ) \\
|| COMPREPLY=( \\
$(compgen -o plusdirs -f -- "${cur}") \\
$(compgen -d -- "${cur}") )
}
_rally()
{
declare -A SUBCOMMANDS
declare -A OPTS
%(data)s
for OPT in ${!OPTS[*]} ; do
CMD=${OPT%%%%_*}
CMDSUB=${OPT#*_}
SUBCOMMANDS[${CMD}]+="${CMDSUB} "
done
COMMANDS="${!SUBCOMMANDS[*]}"
COMPREPLY=()
local cur="${COMP_WORDS[COMP_CWORD]}"
local prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ $cur =~ ^(\.|\~|\/) ]] || [[ $prev =~ ^--out(|put-file)$ ]] ; then
_rally_filedir
elif [[ $prev =~ ^--(task|filename)$ ]] ; then
_rally_filedir "\\.json|\\.yaml|\\.yml"
elif [ $COMP_CWORD == "1" ] ; then
COMPREPLY=($(compgen -W "$COMMANDS" -- ${cur}))
elif [ $COMP_CWORD == "2" ] ; then
COMPREPLY=($(compgen -W "${SUBCOMMANDS[${prev}]}" -- ${cur}))
else
COMMAND="${COMP_WORDS[1]}_${COMP_WORDS[2]}"
COMPREPLY=($(compgen -W "${OPTS[$COMMAND]}" -- ${cur}))
fi
return 0
}
complete -o filenames -F _rally rally
"""
completion = []
for category, cmds in main.categories.items():
for name, command in _methods_of(cmds):
command_name = getattr(command, "alias", name)
args_list = []
for arg in getattr(command, "args", []):
if getattr(command, "deprecated_args", []):
if arg[0][0] not in command.deprecated_args:
args_list.append(arg[0][0])
else:
args_list.append(arg[0][0])
args = " ".join(args_list)
completion.append(""" OPTS["{cat}_{cmd}"]="{args}"\n""".format(
cat=category, cmd=command_name, args=args))
return bash_data % {"data": "".join(sorted(completion))}
| vishnu-kumar/PeformanceFramework | rally_os/cli/cliutils.py | Python | apache-2.0 | 21,850 |
from Tkinter import *
import tkFileDialog as tkfd
import os
class Picker:
def __init__(self, app, parent, dir_type):
self.directorylist = []
self.shortnames = []
self.initdir = None
if os.path.isfile(app.config):
with open(app.config) as e:
for line in e:
splitline = line.split("=")
if splitline[0]=="initdir" and len(splitline[1].strip())!=0:
self.initdir=splitline[1]
self.parent= parent
self.dir_type = dir_type
self.dp_frame = Frame(parent.sel_frame, height=175, width=400, bd=5, relief=RAISED)
self.dp_frame.pack_propagate(False)
self.dp_frame.pack()
if self.dir_type=="target":
self.pd_btn = Button(self.dp_frame, text="Pick target directory", command=self.pick_directory)
else:
self.pd_btn = Button(self.dp_frame, text="Pick source directory", command=self.pick_directory)
self.pd_btn.pack()
self.pd_choices = {}
self.pd_display = Frame(self.dp_frame, height=100, width=400)
self.pd_display.pack_propagate(False)
self.pd_display.pack()
if self.dir_type=="target":
self.pd_choices_label = Label(self.pd_display, text="Selected targets:")
else:
self.pd_choices_label = Label(self.pd_display, text="Selected sources:")
self.pd_choices_label.pack()
self.pd_choices_bar =Scrollbar(self.pd_display)
self.pd_choices_list = Listbox(self.pd_display, selectmode=EXTENDED, height=100, width=384, yscrollcommand=self.pd_choices_bar.set)
self.pd_choices_list.pack()
self.pd_choices_bar.config(command=self.pd_choices_list.yview)
self.remove_btn = Button(self.dp_frame, text="Remove selected", command=self.remove_dirs, state=DISABLED)
self.remove_btn.pack()
def pick_directory(self):
picked = tkfd.askdirectory(initialdir=self.initdir)
if not picked in self.directorylist:
self.directorylist.append(picked)
if self.dir_type=="target":
self.parent.targetlist.append(picked)
else:
self.parent.sourcelist.append(picked)
dir_length = len(picked)
if dir_length>50:
picked_split = picked.split('/')
picked_split = picked_split[1:]
picked_split[0] = '/'+picked_split[0]
if len(".../"+picked_split[-1])>50:
fpicked=".../"+picked_split[-1]
fpicked = fpicked[0:50]
elif (len(picked_split[0]+"/"+picked_split[-1]))>50:
fpicked=".../"+picked_split[-1]
else:
prefix = picked_split[0]+"/.../"
suffix = picked_split[-1]
for dir in reversed(picked_split[1:-1]):
if len(prefix+dir+'/'+suffix)<50:
suffix=dir+'/'+suffix
else:
fpicked=prefix+suffix
else:
fpicked = picked
self.pd_choices_list.insert(END, fpicked)
self.shortnames.append(fpicked)
if self.dir_type=="target":
self.parent.shorttargets.append(fpicked)
else:
self.parent.shortsources.append(fpicked)
self.remove_btn.config(state=NORMAL)
if len(self.parent.targetlist)>0 and len(self.parent.sourcelist)>0:
self.parent.start_btn.config(state=NORMAL)
if len(self.directorylist)>4:
self.pd_choices_list.pack_forget()
self.remove_btn.pack_forget()
self.pd_choices_bar.pack(side=RIGHT, fill=Y)
self.pd_choices_list.pack()
self.remove_btn.pack()
def remove_dirs(self):
to_delete = list(self.pd_choices_list.curselection())
while len(to_delete)>0:
index=to_delete.pop()
self.directorylist.pop(int(index))
self.shortnames.pop(int(index))
if self.dir_type=="target":
self.parent.targetlist.pop(int(index))
self.parent.shorttargets.pop(int(index))
else:
self.parent.sourcelist.pop(int(index))
self.parent.shortsources.pop(int(index))
self.pd_choices_list.delete(index)
to_delete = list(self.pd_choices_list.curselection())
if len(self.directorylist)<=4:
self.pd_choices_list.pack_forget()
self.remove_btn.pack_forget()
self.pd_choices_bar.pack_forget()
self.pd_choices_list.config(width=400)
self.pd_choices_list.pack()
self.remove_btn.pack()
if len(self.directorylist)==0:
self.remove_btn.config(state=DISABLED)
if len(self.parent.targetlist)==0 or len(self.parent.sourcelist)==0:
self.parent.start_btn.config(state=DISABLED)
def update_grid(self):
if self.dir_type=="target":
self.dp_frame.pack_forget()
else:
self.pd_display.config(height=175)
self.pd_choices_list.config(state=DISABLED)
self.pd_btn.pack_forget()
self.remove_btn.pack_forget() | TApicella/photosorter | picker_widget.py | Python | mit | 4,355 |
from __future__ import absolute_import
from . import envs
from . import run
| Abugbilla/wallpaperpy | wallpaperpy/__init__.py | Python | mit | 76 |
"""
Copyright 2012 Jan Demter <[email protected]>
This file is part of LODStats.
LODStats is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
LODStats is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with LODStats. If not, see <http://www.gnu.org/licenses/>.
"""
from os.path import dirname
from .ParsedVocabulary import ParsedVocabulary
class RDFSyntax(ParsedVocabulary):
"""count usage of everything that #isDefinedBy some vocabulary"""
def __init__(self, results):
model = dirname(__file__) + "/../rdf/22-rdf-syntax-ns.rdf"
super(RDFSyntax, self).__init__(results, model)
| jandemter/lodstats | lodstats/stats/RDFSyntax.py | Python | gpl-3.0 | 1,025 |
import base64
import iso8601
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import force_bytes, force_str
from lepo.apidef.parameter.base import NO_VALUE
from lepo.excs import ErroneousParameters, MissingParameter
def cast_primitive_value(type, format, value):
if type == 'boolean':
return (force_str(value).lower() in ('1', 'yes', 'true'))
if type == 'integer' or format in ('integer', 'long'):
return int(value)
if type == 'number' or format in ('float', 'double'):
return float(value)
if format == 'byte': # base64 encoded characters
return base64.b64decode(value)
if format == 'binary': # any sequence of octets
return force_bytes(value)
if format == 'date': # ISO8601 date
return iso8601.parse_date(value).date()
if format == 'dateTime': # ISO8601 datetime
return iso8601.parse_date(value)
if type == 'string':
return force_str(value)
return value
def read_parameters(request, view_kwargs=None, capture_errors=False): # noqa: C901
"""
:param request: HttpRequest with attached api_info
:type request: HttpRequest
:type view_kwargs: dict[str, object]
:type capture_errors: bool
:rtype: dict[str, object]
"""
if view_kwargs is None:
view_kwargs = {}
params = {}
errors = {}
for param in request.api_info.operation.parameters:
try:
value = param.get_value(request, view_kwargs)
if value is NO_VALUE:
if param.has_default:
params[param.name] = param.default
elif param.required: # Required but missing
errors[param.name] = MissingParameter(f'parameter {param.name} is required but missing')
continue # No value, or a default was added, or an error was added.
params[param.name] = param.cast(request.api_info.api, value)
except (NotImplementedError, ImproperlyConfigured):
raise
except Exception as e:
if not capture_errors:
raise
errors[param.name] = e
if errors:
raise ErroneousParameters(errors, params)
return params
| akx/lepo | lepo/parameter_utils.py | Python | mit | 2,238 |
#Run this on the mergedtags.txt file generated from the merge_script.py file
from mrjob.job import MRJob
fh = open("tagsMP.txt","w+")
count = 0
class MRmyjob(MRJob):
def mapper(self, _, line):
global count
wordlist = line.split(" ")
count += 1
for word in wordlist:
yield word, count
def reducer(self, key, list_of_values):
comb = key.encode('utf-8') + "\t" + str(list(list_of_values)).strip("[]") + "\n"
print comb
fh.write(comb)
yield key,list_of_values
if __name__ == '__main__':
MRmyjob.run()
fh.close()
| rayxke/Redshift-Project | data/map_reduce_tags.py | Python | mit | 548 |
def f():
x1: int = <warning descr="Expected type 'int', got 'str' instead">'foo'</warning>
x2: str = 'bar'
x3: int = 0
x4: str = <warning descr="Expected type 'str', got 'int' instead">1</warning>
| siosio/intellij-community | python/testData/inspections/PyTypeCheckerInspection/Assignment.py | Python | apache-2.0 | 213 |
# -*- coding:utf-8 -*-
"""
/***************************************************************************
Python Console for QGIS
-------------------
begin : 2012-09-10
copyright : (C) 2012 by Salvatore Larosa
email : lrssvtml (at) gmail (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Some portions of code were taken from https://code.google.com/p/pydee/
"""
from qgis.PyQt.QtCore import Qt, QByteArray, QCoreApplication, QFile, QSize
from qgis.PyQt.QtWidgets import QDialog, QMenu, QShortcut, QApplication
from qgis.PyQt.QtGui import QColor, QKeySequence, QFont, QFontMetrics, QStandardItemModel, QStandardItem, QClipboard, \
QFontDatabase
from qgis.PyQt.Qsci import QsciScintilla, QsciLexerPython, QsciAPIs
import sys
import os
import code
import codecs
import re
import traceback
from qgis.core import QgsApplication, QgsSettings, Qgis
from .ui_console_history_dlg import Ui_HistoryDialogPythonConsole
_init_commands = ["import sys", "import os", "import re", "import math", "from qgis.core import *",
"from qgis.gui import *", "from qgis.analysis import *", "from qgis._3d import *",
"import processing", "import qgis.utils",
"from qgis.utils import iface", "from qgis.PyQt.QtCore import *", "from qgis.PyQt.QtGui import *",
"from qgis.PyQt.QtWidgets import *",
"from qgis.PyQt.QtNetwork import *", "from qgis.PyQt.QtXml import *"]
_historyFile = os.path.join(QgsApplication.qgisSettingsDirPath(), "console_history.txt")
class ShellScintilla(QsciScintilla, code.InteractiveInterpreter):
DEFAULT_COLOR = "#4d4d4c"
KEYWORD_COLOR = "#8959a8"
CLASS_COLOR = "#4271ae"
METHOD_COLOR = "#4271ae"
DECORATION_COLOR = "#3e999f"
NUMBER_COLOR = "#c82829"
COMMENT_COLOR = "#8e908c"
COMMENT_BLOCK_COLOR = "#8e908c"
BACKGROUND_COLOR = "#ffffff"
CURSOR_COLOR = "#636363"
CARET_LINE_COLOR = "#efefef"
SINGLE_QUOTE_COLOR = "#718c00"
DOUBLE_QUOTE_COLOR = "#718c00"
TRIPLE_SINGLE_QUOTE_COLOR = "#eab700"
TRIPLE_DOUBLE_QUOTE_COLOR = "#eab700"
MARGIN_BACKGROUND_COLOR = "#efefef"
MARGIN_FOREGROUND_COLOR = "#636363"
SELECTION_BACKGROUND_COLOR = "#d7d7d7"
SELECTION_FOREGROUND_COLOR = "#303030"
MATCHED_BRACE_BACKGROUND_COLOR = "#b7f907"
MATCHED_BRACE_FOREGROUND_COLOR = "#303030"
def __init__(self, parent=None):
super(ShellScintilla, self).__init__(parent)
code.InteractiveInterpreter.__init__(self, locals=None)
self.parent = parent
self.opening = ['(', '{', '[', "'", '"']
self.closing = [')', '}', ']', "'", '"']
self.settings = QgsSettings()
# Enable non-ascii chars for editor
self.setUtf8(True)
self.new_input_line = True
# Set the default font
font = QFontDatabase.systemFont(QFontDatabase.FixedFont)
self.setFont(font)
self.setMarginsFont(font)
# Margin 0 is used for line numbers
self.setMarginWidth(0, 0)
self.setMarginWidth(1, 0)
self.setMarginWidth(2, 0)
self.setMarginsFont(font)
self.setMarginWidth(1, "00000")
self.setMarginType(1, 5)
self.setCaretLineVisible(False)
self.buffer = []
self.displayPrompt(False)
for line in _init_commands:
self.runsource(line)
self.history = []
self.softHistory = ['']
self.softHistoryIndex = 0
# Read history command file
self.readHistoryFile()
self.historyDlg = HistoryDialog(self)
# Brace matching: enable for a brace immediately before or after
# the current position
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
# Current line visible with special background color
self.setCaretWidth(2)
self.refreshSettingsShell()
# Don't want to see the horizontal scrollbar at all
# Use raw message to Scintilla here (all messages are documented
# here: http://www.scintilla.org/ScintillaDoc.html)
self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 0)
# not too small
# self.setMinimumSize(500, 300)
self.setWrapMode(QsciScintilla.WrapCharacter)
self.SendScintilla(QsciScintilla.SCI_EMPTYUNDOBUFFER)
# Disable command key
ctrl, shift = self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('Z') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('Y') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl + shift)
# New QShortcut = ctrl+space/ctrl+alt+space for Autocomplete
self.newShortcutCSS = QShortcut(QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_Space), self)
self.newShortcutCAS = QShortcut(QKeySequence(Qt.CTRL + Qt.ALT + Qt.Key_Space), self)
self.newShortcutCSS.setContext(Qt.WidgetShortcut)
self.newShortcutCAS.setContext(Qt.WidgetShortcut)
self.newShortcutCAS.activated.connect(self.autoCompleteKeyBinding)
self.newShortcutCSS.activated.connect(self.showHistory)
def _setMinimumHeight(self):
font = self.lexer.defaultFont(0)
fm = QFontMetrics(font)
self.setMinimumHeight(fm.height() + 10)
def refreshSettingsShell(self):
# Set Python lexer
self.setLexers()
threshold = self.settings.value("pythonConsole/autoCompThreshold", 2, type=int)
self.setAutoCompletionThreshold(threshold)
radioButtonSource = self.settings.value("pythonConsole/autoCompleteSource", 'fromAPI')
autoCompEnabled = self.settings.value("pythonConsole/autoCompleteEnabled", True, type=bool)
if autoCompEnabled:
if radioButtonSource == 'fromDoc':
self.setAutoCompletionSource(self.AcsDocument)
elif radioButtonSource == 'fromAPI':
self.setAutoCompletionSource(self.AcsAPIs)
elif radioButtonSource == 'fromDocAPI':
self.setAutoCompletionSource(self.AcsAll)
else:
self.setAutoCompletionSource(self.AcsNone)
cursorColor = self.settings.value("pythonConsole/cursorColor", QColor(self.CURSOR_COLOR))
self.setCaretForegroundColor(cursorColor)
self.setSelectionForegroundColor(QColor(
self.settings.value("pythonConsole/selectionForegroundColor", QColor(self.SELECTION_FOREGROUND_COLOR))))
self.setSelectionBackgroundColor(QColor(
self.settings.value("pythonConsole/selectionBackgroundColor", QColor(self.SELECTION_BACKGROUND_COLOR))))
self.setMatchedBraceBackgroundColor(QColor(self.settings.value("pythonConsole/matchedBraceBackgroundColor",
QColor(self.MATCHED_BRACE_BACKGROUND_COLOR))))
self.setMatchedBraceForegroundColor(QColor(self.settings.value("pythonConsole/matchedBraceForegroundColor",
QColor(self.MATCHED_BRACE_FOREGROUND_COLOR))))
self.setMarginsBackgroundColor(QColor(self.settings.value("pythonConsole/paperBackgroundColor", QColor(self.BACKGROUND_COLOR))))
# Sets minimum height for input area based of font metric
self._setMinimumHeight()
def showHistory(self):
if not self.historyDlg.isVisible():
self.historyDlg.show()
self.historyDlg._reloadHistory()
self.historyDlg.activateWindow()
def autoCompleteKeyBinding(self):
radioButtonSource = self.settings.value("pythonConsole/autoCompleteSource", 'fromAPI')
autoCompEnabled = self.settings.value("pythonConsole/autoCompleteEnabled", True, type=bool)
if autoCompEnabled:
if radioButtonSource == 'fromDoc':
self.autoCompleteFromDocument()
elif radioButtonSource == 'fromAPI':
self.autoCompleteFromAPIs()
elif radioButtonSource == 'fromDocAPI':
self.autoCompleteFromAll()
def commandConsole(self, commands):
if not self.is_cursor_on_last_line():
self.move_cursor_to_end()
for cmd in commands:
self.setText(cmd)
self.entered()
self.move_cursor_to_end()
self.setFocus()
def setLexers(self):
self.lexer = QsciLexerPython()
font = QFontDatabase.systemFont(QFontDatabase.FixedFont)
loadFont = self.settings.value("pythonConsole/fontfamilytext")
if loadFont:
font.setFamily(loadFont)
fontSize = self.settings.value("pythonConsole/fontsize", type=int)
if fontSize:
font.setPointSize(fontSize)
self.lexer.setDefaultFont(font)
self.lexer.setDefaultColor(
QColor(self.settings.value("pythonConsole/defaultFontColor", QColor(self.DEFAULT_COLOR))))
self.lexer.setColor(QColor(self.settings.value("pythonConsole/commentFontColor", QColor(self.COMMENT_COLOR))),
1)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/numberFontColor", QColor(self.NUMBER_COLOR))), 2)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/keywordFontColor", QColor(self.KEYWORD_COLOR))),
5)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/classFontColor", QColor(self.CLASS_COLOR))), 8)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/methodFontColor", QColor(self.METHOD_COLOR))), 9)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/decorFontColor", QColor(self.DECORATION_COLOR))),
15)
self.lexer.setColor(
QColor(self.settings.value("pythonConsole/commentBlockFontColor", QColor(self.COMMENT_BLOCK_COLOR))), 12)
self.lexer.setColor(
QColor(self.settings.value("pythonConsole/singleQuoteFontColor", QColor(self.SINGLE_QUOTE_COLOR))), 4)
self.lexer.setColor(
QColor(self.settings.value("pythonConsole/doubleQuoteFontColor", QColor(self.DOUBLE_QUOTE_COLOR))), 3)
self.lexer.setColor(QColor(
self.settings.value("pythonConsole/tripleSingleQuoteFontColor", QColor(self.TRIPLE_SINGLE_QUOTE_COLOR))), 6)
self.lexer.setColor(QColor(
self.settings.value("pythonConsole/tripleDoubleQuoteFontColor", QColor(self.TRIPLE_DOUBLE_QUOTE_COLOR))), 7)
self.lexer.setColor(
QColor(self.settings.value("pythonConsole/defaultFontColorEditor", QColor(self.DEFAULT_COLOR))), 13)
self.lexer.setFont(font, 1)
self.lexer.setFont(font, 3)
self.lexer.setFont(font, 4)
self.lexer.setFont(font, QsciLexerPython.UnclosedString)
for style in range(0, 33):
paperColor = QColor(
self.settings.value("pythonConsole/paperBackgroundColor", QColor(self.BACKGROUND_COLOR)))
self.lexer.setPaper(paperColor, style)
self.api = QsciAPIs(self.lexer)
checkBoxAPI = self.settings.value("pythonConsole/preloadAPI", True, type=bool)
checkBoxPreparedAPI = self.settings.value("pythonConsole/usePreparedAPIFile", False, type=bool)
if checkBoxAPI:
pap = os.path.join(QgsApplication.pkgDataPath(), "python", "qsci_apis", "pyqgis.pap")
self.api.loadPrepared(pap)
elif checkBoxPreparedAPI:
self.api.loadPrepared(self.settings.value("pythonConsole/preparedAPIFile"))
else:
apiPath = self.settings.value("pythonConsole/userAPI", [])
for i in range(0, len(apiPath)):
self.api.load(apiPath[i])
self.api.prepare()
self.lexer.setAPIs(self.api)
self.setLexer(self.lexer)
# TODO: show completion list for file and directory
def getText(self):
""" Get the text as a unicode string. """
value = self.getBytes().decode('utf-8')
# print (value) printing can give an error because the console font
# may not have all unicode characters
return value
def getBytes(self):
""" Get the text as bytes (utf-8 encoded). This is how
the data is stored internally. """
len = self.SendScintilla(self.SCI_GETLENGTH) + 1
bb = QByteArray(len, '0')
self.SendScintilla(self.SCI_GETTEXT, len, bb)
return bytes(bb)[:-1]
def getTextLength(self):
return self.SendScintilla(QsciScintilla.SCI_GETLENGTH)
def get_end_pos(self):
"""Return (line, index) position of the last character"""
line = self.lines() - 1
return line, len(self.text(line))
def is_cursor_at_start(self):
"""Return True if cursor is at the end of text"""
cline, cindex = self.getCursorPosition()
return cline == 0 and cindex == 0
def is_cursor_at_end(self):
"""Return True if cursor is at the end of text"""
cline, cindex = self.getCursorPosition()
return (cline, cindex) == self.get_end_pos()
def move_cursor_to_start(self):
"""Move cursor to start of text"""
self.setCursorPosition(0, 0)
self.ensureCursorVisible()
self.ensureLineVisible(0)
self.displayPrompt(False)
def move_cursor_to_end(self):
"""Move cursor to end of text"""
line, index = self.get_end_pos()
self.setCursorPosition(line, index)
self.ensureCursorVisible()
self.ensureLineVisible(line)
self.displayPrompt(False)
def is_cursor_on_last_line(self):
"""Return True if cursor is on the last line"""
cline, _ = self.getCursorPosition()
return cline == self.lines() - 1
def new_prompt(self, prompt):
"""
Print a new prompt and save its (line, index) position
"""
self.write(prompt, prompt=True)
# now we update our cursor giving end of prompt
line, index = self.getCursorPosition()
self.ensureCursorVisible()
self.ensureLineVisible(line)
def displayPrompt(self, more=False):
self.SendScintilla(QsciScintilla.SCI_MARGINSETTEXT, 0, str.encode("..." if more else ">>>"))
def syncSoftHistory(self):
self.softHistory = self.history[:]
self.softHistory.append('')
self.softHistoryIndex = len(self.softHistory) - 1
def updateSoftHistory(self):
self.softHistory[self.softHistoryIndex] = self.text()
def updateHistory(self, command, skipSoftHistory=False):
if isinstance(command, list):
for line in command:
self.history.append(line)
elif not command == "":
if len(self.history) <= 0 or \
command != self.history[-1]:
self.history.append(command)
if not skipSoftHistory:
self.syncSoftHistory()
def writeHistoryFile(self, fromCloseConsole=False):
ok = False
try:
wH = codecs.open(_historyFile, 'w', encoding='utf-8')
for s in self.history:
wH.write(s + '\n')
ok = True
except:
raise
wH.close()
if ok and not fromCloseConsole:
msgText = QCoreApplication.translate('PythonConsole',
'History saved successfully.')
self.parent.callWidgetMessageBar(msgText)
def readHistoryFile(self):
fileExist = QFile.exists(_historyFile)
if fileExist:
with codecs.open(_historyFile, 'r', encoding='utf-8') as rH:
for line in rH:
if line != "\n":
l = line.rstrip('\n')
self.updateHistory(l, True)
self.syncSoftHistory()
else:
return
def clearHistory(self, clearSession=False):
if clearSession:
self.history = []
self.syncSoftHistory()
msgText = QCoreApplication.translate('PythonConsole',
'Session and file history cleared successfully.')
self.parent.callWidgetMessageBar(msgText)
return
ok = False
try:
cH = codecs.open(_historyFile, 'w', encoding='utf-8')
ok = True
except:
raise
cH.close()
if ok:
msgText = QCoreApplication.translate('PythonConsole',
'History cleared successfully.')
self.parent.callWidgetMessageBar(msgText)
def clearHistorySession(self):
self.clearHistory(True)
def showPrevious(self):
if self.softHistoryIndex < len(self.softHistory) - 1 and self.softHistory:
self.softHistoryIndex += 1
self.setText(self.softHistory[self.softHistoryIndex])
self.move_cursor_to_end()
# self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
def showNext(self):
if self.softHistoryIndex > 0 and self.softHistory:
self.softHistoryIndex -= 1
self.setText(self.softHistory[self.softHistoryIndex])
self.move_cursor_to_end()
# self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
def keyPressEvent(self, e):
# update the live history
self.updateSoftHistory()
startLine, startPos, endLine, endPos = self.getSelection()
# handle invalid cursor position and multiline selections
if startLine < endLine:
# allow copying and selecting
if e.modifiers() & (Qt.ControlModifier | Qt.MetaModifier):
if e.key() == Qt.Key_C:
# only catch and return from Ctrl-C here if there's a selection
if self.hasSelectedText():
QsciScintilla.keyPressEvent(self, e)
return
elif e.key() == Qt.Key_A:
QsciScintilla.keyPressEvent(self, e)
return
else:
return
# allow selection
if e.modifiers() & Qt.ShiftModifier:
if e.key() in (Qt.Key_Left, Qt.Key_Right, Qt.Key_Home, Qt.Key_End):
QsciScintilla.keyPressEvent(self, e)
return
# all other keystrokes get sent to the input line
self.move_cursor_to_end()
if e.modifiers() & (
Qt.ControlModifier | Qt.MetaModifier) and e.key() == Qt.Key_C and not self.hasSelectedText():
# keyboard interrupt
sys.stdout.fire_keyboard_interrupt = True
return
line, index = self.getCursorPosition()
cmd = self.text(line)
hasSelectedText = self.hasSelectedText()
if e.key() in (Qt.Key_Return, Qt.Key_Enter) and not self.isListActive():
self.entered()
elif e.key() in (Qt.Key_Left, Qt.Key_Home):
QsciScintilla.keyPressEvent(self, e)
elif e.key() in (Qt.Key_Backspace, Qt.Key_Delete):
QsciScintilla.keyPressEvent(self, e)
self.recolor()
elif (e.modifiers() & (Qt.ControlModifier | Qt.MetaModifier) and e.key() == Qt.Key_V) or \
(e.modifiers() & Qt.ShiftModifier and e.key() == Qt.Key_Insert):
self.paste()
e.accept()
elif e.key() == Qt.Key_Down and not self.isListActive():
self.showPrevious()
elif e.key() == Qt.Key_Up and not self.isListActive():
self.showNext()
# TODO: press event for auto-completion file directory
else:
t = e.text()
self.autoCloseBracket = self.settings.value("pythonConsole/autoCloseBracket", False, type=bool)
self.autoImport = self.settings.value("pythonConsole/autoInsertionImport", True, type=bool)
# Close bracket automatically
if t in self.opening and self.autoCloseBracket:
i = self.opening.index(t)
if self.hasSelectedText() and startPos != 0:
selText = self.selectedText()
self.removeSelectedText()
self.insert(self.opening[i] + selText + self.closing[i])
self.setCursorPosition(endLine, endPos + 2)
return
elif t == '(' and (re.match(r'^[ \t]*def \w+$', cmd)
or re.match(r'^[ \t]*class \w+$', cmd)):
self.insert('):')
else:
self.insert(self.closing[i])
# FIXES #8392 (automatically removes the redundant char
# when autoclosing brackets option is enabled)
elif t in [')', ']', '}'] and self.autoCloseBracket:
try:
if cmd[index - 1] in self.opening and t == cmd[index]:
self.setCursorPosition(line, index + 1)
self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
except IndexError:
pass
elif t == ' ' and self.autoImport:
ptrn = r'^[ \t]*from [\w.]+$'
if re.match(ptrn, cmd):
self.insert(' import')
self.setCursorPosition(line, index + 7)
QsciScintilla.keyPressEvent(self, e)
if len(self.text(0)) == 0 or hasSelectedText:
self.displayPrompt(False)
def contextMenuEvent(self, e):
menu = QMenu(self)
subMenu = QMenu(menu)
titleHistoryMenu = QCoreApplication.translate("PythonConsole", "Command History")
subMenu.setTitle(titleHistoryMenu)
subMenu.addAction(
QCoreApplication.translate("PythonConsole", "Show"),
self.showHistory, 'Ctrl+Shift+SPACE')
subMenu.addAction(
QCoreApplication.translate("PythonConsole", "Clear File"),
self.clearHistory)
subMenu.addAction(
QCoreApplication.translate("PythonConsole", "Clear Session"),
self.clearHistorySession)
menu.addMenu(subMenu)
menu.addSeparator()
copyAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Copy"),
self.copy, QKeySequence.Copy)
pasteAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Paste"),
self.paste, QKeySequence.Paste)
copyAction.setEnabled(False)
pasteAction.setEnabled(False)
if self.hasSelectedText():
copyAction.setEnabled(True)
if QApplication.clipboard().text():
pasteAction.setEnabled(True)
menu.exec_(self.mapToGlobal(e.pos()))
def mousePressEvent(self, e):
"""
Re-implemented to handle the mouse press event.
e: the mouse press event (QMouseEvent)
"""
self.setFocus()
if e.button() == Qt.MidButton:
stringSel = QApplication.clipboard().text(QClipboard.Selection)
if not self.is_cursor_on_last_line():
self.move_cursor_to_end()
self.insertFromDropPaste(stringSel)
e.accept()
else:
QsciScintilla.mousePressEvent(self, e)
def paste(self):
"""
Method to display data from the clipboard.
XXX: It should reimplement the virtual QScintilla.paste method,
but it seems not used by QScintilla code.
"""
stringPaste = QApplication.clipboard().text()
if self.is_cursor_on_last_line():
if self.hasSelectedText():
self.removeSelectedText()
else:
self.move_cursor_to_end()
self.insertFromDropPaste(stringPaste)
# Drag and drop
def dropEvent(self, e):
if e.mimeData().hasText():
stringDrag = e.mimeData().text()
self.insertFromDropPaste(stringDrag)
self.setFocus()
e.setDropAction(Qt.CopyAction)
e.accept()
else:
QsciScintilla.dropEvent(self, e)
def insertFromDropPaste(self, textDP):
pasteList = textDP.splitlines()
if pasteList:
for line in pasteList[:-1]:
cleanLine = line.replace(">>> ", "").replace("... ", "")
self.insert(cleanLine)
self.move_cursor_to_end()
self.runCommand(self.currentCommand())
if pasteList[-1] != "":
line = pasteList[-1]
cleanLine = line.replace(">>> ", "").replace("... ", "")
curpos = self.getCursorPosition()
self.insert(cleanLine)
self.setCursorPosition(curpos[0], curpos[1] + len(cleanLine))
def insertTextFromFile(self, listOpenFile):
for line in listOpenFile[:-1]:
self.append(line)
self.move_cursor_to_end()
self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
self.runCommand(self.currentCommand())
self.append(listOpenFile[-1])
self.move_cursor_to_end()
self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
def entered(self):
self.move_cursor_to_end()
self.runCommand(self.currentCommand())
self.setFocus()
self.move_cursor_to_end()
def currentCommand(self):
string = self.text()
cmd = string
return cmd
def runCommand(self, cmd):
self.writeCMD(cmd)
import webbrowser
self.updateHistory(cmd)
version = 'master' if 'master' in Qgis.QGIS_VERSION.lower() else re.findall(r'^\d.[0-9]*', Qgis.QGIS_VERSION)[0]
if cmd in ('_pyqgis', '_api', '_cookbook'):
if cmd == '_pyqgis':
webbrowser.open("https://qgis.org/pyqgis/{}".format(version))
elif cmd == '_api':
webbrowser.open("https://qgis.org/api/{}".format('' if version == 'master' else version))
elif cmd == '_cookbook':
webbrowser.open("https://docs.qgis.org/{}/en/docs/pyqgis_developer_cookbook/".format(
'testing' if version == 'master' else version))
more = False
else:
self.buffer.append(cmd)
src = "\n".join(self.buffer)
more = self.runsource(src)
if not more:
self.buffer = []
# prevents to commands with more lines to break the console
# in the case they have a eol different from '\n'
self.setText('')
self.move_cursor_to_end()
self.displayPrompt(more)
def write(self, txt):
sys.stderr.write(txt)
def writeCMD(self, txt):
if sys.stdout:
sys.stdout.fire_keyboard_interrupt = False
if len(txt) > 0:
getCmdString = self.text()
sys.stdout.write('>>> ' + txt + '\n')
def runsource(self, source, filename='<input>', symbol='single'):
if sys.stdout:
sys.stdout.fire_keyboard_interrupt = False
hook = sys.excepthook
try:
def excepthook(etype, value, tb):
self.write("".join(traceback.format_exception(etype, value, tb)))
sys.excepthook = excepthook
return super(ShellScintilla, self).runsource(source, filename, symbol)
finally:
sys.excepthook = hook
class HistoryDialog(QDialog, Ui_HistoryDialogPythonConsole):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.parent = parent
self.setWindowTitle(QCoreApplication.translate("PythonConsole",
"Python Console - Command History"))
self.listView.setToolTip(QCoreApplication.translate("PythonConsole",
"Double-click on item to execute"))
self.model = QStandardItemModel(self.listView)
self._reloadHistory()
self.deleteScut = QShortcut(QKeySequence(Qt.Key_Delete), self)
self.deleteScut.activated.connect(self._deleteItem)
self.listView.doubleClicked.connect(self._runHistory)
self.reloadHistory.clicked.connect(self._reloadHistory)
self.saveHistory.clicked.connect(self._saveHistory)
def _runHistory(self, item):
cmd = item.data(Qt.DisplayRole)
self.parent.runCommand(cmd)
def _saveHistory(self):
self.parent.writeHistoryFile(True)
def _reloadHistory(self):
self.model.clear()
for i in self.parent.history:
item = QStandardItem(i)
if sys.platform.startswith('win'):
item.setSizeHint(QSize(18, 18))
self.model.appendRow(item)
self.listView.setModel(self.model)
self.listView.scrollToBottom()
def _deleteItem(self):
itemsSelected = self.listView.selectionModel().selectedIndexes()
if itemsSelected:
item = itemsSelected[0].row()
# Remove item from the command history (just for the current session)
self.parent.history.pop(item)
self.parent.historyIndex -= 1
# Remove row from the command history dialog
self.model.removeRow(item)
| olivierdalang/QGIS | python/console/console_sci.py | Python | gpl-2.0 | 30,240 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['PolyTrend'] , ['NoCycle'] , ['AR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_NoCycle_AR.py | Python | bsd-3-clause | 150 |
import pkgutil
import logging
from cifsdk.constants import LOG_FORMAT, RUNTIME_PATH, LOGLEVEL, VERSION
from argparse import ArgumentParser
import signal
import yaml
import os
def read_config(args):
options = {}
if os.path.isfile(args.config):
with open(args.config) as f:
config = yaml.safe_load(f)
if not config:
return options
if config.get('client'):
config = config['client']
f.close()
if not config:
print("Unable to read {} config file".format(args.config))
raise SystemExit
for k in config:
if not options.get(k):
options[k] = config[k]
else:
print("Unable to read {} config file".format(args.config))
raise SystemExit
return options
def get_argument_parser():
BasicArgs = ArgumentParser(add_help=False)
BasicArgs.add_argument('-d', '--debug', dest='debug', action="store_true")
BasicArgs.add_argument('-V', '--version', action='version', version=VERSION)
BasicArgs.add_argument(
"--runtime-path", help="specify the runtime path [default %(default)s]", default=RUNTIME_PATH
)
return ArgumentParser(parents=[BasicArgs], add_help=False)
def load_plugin(path, plugin):
p = None
for loader, modname, is_pkg in pkgutil.iter_modules([path]):
if modname == plugin or modname == 'z{}'.format(plugin):
p = loader.find_module(modname).load_module(modname)
p = p.Plugin
return p
def setup_logging(args):
loglevel = logging.getLevelName(LOGLEVEL)
if args.debug:
loglevel = logging.DEBUG
console = logging.StreamHandler()
logging.getLogger('').setLevel(loglevel)
console.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger('').addHandler(console)
def setup_signals(name):
logger = logging.getLogger(__name__)
def sigterm_handler(_signo, _stack_frame):
logger.info('SIGTERM Caught for {}, shutting down...'.format(name))
raise SystemExit
signal.signal(signal.SIGTERM, sigterm_handler)
def setup_runtime_path(path):
if not os.path.isdir(path):
os.mkdir(path)
| csirtgadgets/bearded-avenger-sdk-py | cifsdk/utils/__init__.py | Python | mpl-2.0 | 2,198 |
import random
def d6():
result = random.randint(1,6)
if result == 6:
result = 6 + random.randint(1,6)
print("The result is: " + str(result))
| tarzenda/pyrus | assn4/ex14.py | Python | bsd-2-clause | 162 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import importlib
from PyQt5.Qt import QToolButton
from calibre import prints
from calibre.customize.ui import all_edit_book_tool_plugins
from calibre.gui2.tweak_book import tprefs, current_container
from calibre.gui2.tweak_book.boss import get_boss
class Tool(object):
'''
.. module:: calibre.gui2.tweak_book.plugin.Tool
The base class for individual tools in an Edit Book plugin. Useful members include:
* ``self.plugin``: A reference to the :class:`calibre.customize.Plugin` object to which this tool belongs.
* self. :attr:`boss`
* self. :attr:`gui`
Methods that must be overridden in sub classes:
* :meth:`create_action`
* :meth:`register_shortcut`
'''
#: Set this to a unique name it will be used as a key
name = None
#: If True the user can choose to place this tool in the plugins toolbar
allowed_in_toolbar = True
#: If True the user can choose to place this tool in the plugins menu
allowed_in_menu = True
#: The popup mode for the menu (if any) of the toolbar button. Possible values are 'delayed', 'instant', 'button'
toolbar_button_popup_mode = 'delayed'
@property
def boss(self):
' The :class:`calibre.gui2.tweak_book.boss.Boss` object. Used to control the user interface. '
return get_boss()
@property
def gui(self):
' The main window of the user interface '
return self.boss.gui
@property
def current_container(self):
' Return the current :class:`calibre.ebooks.oeb.polish.container.Container` object that represents the book being edited. '
return current_container()
def register_shortcut(self, qaction, unique_name, default_keys=(), short_text=None, description=None, **extra_data):
'''
Register a keyboard shortcut that will trigger the specified ``qaction``. This keyboard shortcut
will become automatically customizable by the user in the Keyboard section of the editor preferences.
:param qaction: A QAction object, it will be triggered when the
configured key combination is pressed by the user.
:param unique_name: A unique name for this shortcut/action. It will be
used internally, it must not be shared by any other actions in this
plugin.
:param default_keys: A list of the default keyboard shortcuts. If not
specified no default shortcuts will be set. If the shortcuts specified
here conflict with either builtin shortcuts or shortcuts from user
configuration/other plugins, they will be ignored. In that case, users
will have to configure the shortcuts manually via Preferences. For example:
``default_keys=('Ctrl+J', 'F9')``.
:param short_text: An optional short description of this action. If not
specified the text from the QAction will be used.
:param description: An optional longer description of this action, it
will be used in the preferences entry for this shortcut.
'''
short_text = short_text or unicode(qaction.text()).replace('&&', '\0').replace('&', '').replace('\0', '&')
self.gui.keyboard.register_shortcut(
self.name + '_' + unique_name, short_text, default_keys=default_keys,
description=description or '', group=_('Plugins'))
def create_action(self, for_toolbar=True):
'''
Create a QAction that will be added to either the plugins toolbar or
the plugins menu depending on ``for_toolbar``. For example::
def create_action(self, for_toolbar=True):
ac = QAction(get_icons('myicon.png'), 'Do something')
if for_toolbar:
# We want the toolbar button to have a popup menu
menu = QMenu()
ac.setMenu(menu)
menu.addAction('Do something else')
subaction = menu.addAction('And another')
# Register a keyboard shortcut for this toolbar action be
# careful to do this for only one of the toolbar action or
# the menu action, not both.
self.register_shortcut(ac, 'some-unique-name', default_keys=('Ctrl+K',))
return ac
.. seealso:: Method :meth:`register_shortcut`.
'''
raise NotImplementedError()
def load_plugin_tools(plugin):
try:
main = importlib.import_module(plugin.__class__.__module__+'.main')
except ImportError:
import traceback
traceback.print_stack()
else:
for x in vars(main).itervalues():
if isinstance(x, type) and x is not Tool and issubclass(x, Tool):
ans = x()
ans.plugin = plugin
yield ans
def plugin_action_sid(plugin, tool, for_toolbar=True):
return plugin.name + tool.name + ('toolbar' if for_toolbar else 'menu')
plugin_toolbar_actions = []
def create_plugin_action(plugin, tool, for_toolbar, actions=None, toolbar_actions=None, plugin_menu_actions=None):
try:
ac = tool.create_action(for_toolbar=for_toolbar)
if ac is None:
raise RuntimeError('create_action() failed to return an action')
except Exception:
prints('Failed to create action for tool:', tool.name)
import traceback
traceback.print_exc()
return
sid = plugin_action_sid(plugin, tool, for_toolbar)
if actions is not None and sid in actions:
prints('The %s tool from the %s plugin has a non unique name, ignoring' % (tool.name, plugin.name))
else:
if actions is not None:
actions[sid] = ac
ac.sid = sid
if for_toolbar:
if toolbar_actions is not None:
toolbar_actions[sid] = ac
plugin_toolbar_actions.append(ac)
ac.popup_mode = {'instant':QToolButton.InstantPopup, 'button':QToolButton.MenuButtonPopup}.get(
tool.toolbar_button_popup_mode, QToolButton.DelayedPopup)
else:
if plugin_menu_actions is not None:
plugin_menu_actions.append(ac)
return ac
_tool_memory = [] # Needed to prevent the tool object from being garbage collected
def create_plugin_actions(actions, toolbar_actions, plugin_menu_actions):
del _tool_memory[:]
del plugin_toolbar_actions[:]
for plugin in all_edit_book_tool_plugins():
for tool in load_plugin_tools(plugin):
_tool_memory.append(tool)
if tool.allowed_in_toolbar:
create_plugin_action(plugin, tool, True, actions, toolbar_actions, plugin_menu_actions)
if tool.allowed_in_menu:
create_plugin_action(plugin, tool, False, actions, toolbar_actions, plugin_menu_actions)
def install_plugin(plugin):
for tool in load_plugin_tools(plugin):
if tool.allowed_in_toolbar:
sid = plugin_action_sid(plugin, tool, True)
if sid not in tprefs['global_plugins_toolbar']:
tprefs['global_plugins_toolbar'] = tprefs['global_plugins_toolbar'] + [sid]
| sharad/calibre | src/calibre/gui2/tweak_book/plugin.py | Python | gpl-3.0 | 7,426 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from adminplus.sites import AdminSitePlus
admin.site = AdminSitePlus()
admin.autodiscover()
urlpatterns = patterns('',
url(r'^map/', include('map.urls', namespace="map")),
# Admin site
url(r'^admin/', include(admin.site.urls)),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| letoosh/techfugees | techfugees/techfugees/urls.py | Python | bsd-3-clause | 635 |
import os
import sys
import copy
import AutoGemmParameters
import Common
import KernelParameters
def indent(il):
returnTabs = ""
for i in range(0, il):
returnTabs += " "
return returnTabs
def tileInRange( tileMin, tileMax, rangeMin, rangeMax):
if ( tileMax < 0 or (tileMax >= rangeMax and rangeMax>0) ) and tileMin <= rangeMin :
valid = True
else:
valid = False
#print "Range [%4ux%4u]: [%4u,%4u] is %s b/c" \
# % (rangeMin, rangeMax, tileMin, tileMax, "valid" if valid else "INVALID" )
#print "if ( %i<0 or (%u >= %u and %u>0) and %u <= %u" \
# %( tileMax, tileMax, rangeMax, rangeMax, tileMin, rangeMin )
return valid
################################################################################
# KSL - Kernel Selection Logic File
################################################################################
class KernelSelection:
##############################################################################
# KSL - default constructor
##############################################################################
def __init__( \
self, \
precisionList, \
orderList, \
transDict, \
betaList, \
unrollDict, \
kernelSelectionData):
self.incFileName = Common.getIncludePath() + "AutoGemmKernelSelection.h"
self.incFile = open(self.incFileName, "w")
self.incFile.write( Common.getAutoGemmHeader() )
self.kernelSelectionFileName = Common.getIncludePath() + "AutoGemmKernelSelection.cpp"
self.selectionFile = open(self.kernelSelectionFileName, "w")
self.selectionFile.write( Common.getAutoGemmHeader() )
self.inc = (
"#include <clBLAS.h>\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelSources.h\"\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBinaries.h\"\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsSource.h\"\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsBinary.h\"\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmClKernels.h\"\n"
"\n"
"#define EXACT_MULTIPLES(MULTIPLE_STR) MULTIPLE_STR\n"
"\n"
"// kernel selection logic template\n"
"template<typename Precision>\n"
"void gemmSelectKernel(\n"
" clblasOrder order,\n"
" clblasTranspose transA,\n"
" clblasTranspose transB,\n"
" size_t M,\n"
" size_t N,\n"
" size_t K,\n"
" bool betaNonZero,\n"
" float optimalNumElementsPerWorkItem,\n"
" const char **tileKernelSource,\n"
" const char **rowKernelSource,\n"
" const char **colKernelSource,\n"
" const char **cornerKernelSource,\n"
" const char **sourceBuildOptions,\n"
" const unsigned char **tileKernelBinary,\n"
" const unsigned char **rowKernelBinary,\n"
" const unsigned char **colKernelBinary,\n"
" const unsigned char **cornerKernelBinary,\n"
" size_t **tileKernelBinarySize,\n"
" size_t **rowKernelBinarySize,\n"
" size_t **colKernelBinarySize,\n"
" size_t **cornerKernelBinarySize,\n"
" const char **binaryBuildOptions,\n"
" cl_kernel **tileClKernel,\n"
" cl_kernel **rowClKernel,\n"
" cl_kernel **colClKernel,\n"
" cl_kernel **cornerClKernel,\n"
" unsigned int *workGroupNumRows,\n"
" unsigned int *workGroupNumCols,\n"
" unsigned int *microTileNumRows,\n"
" unsigned int *microTileNumCols,\n"
" unsigned int *unroll\n"
");\n\n" )
self.logic = "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelSelection.h\"\n"
####################################
# precision
kernel = KernelParameters.KernelParameters()
for precision in precisionList:
#self.selectionFile.write( self.logic )
#self.logic = ""
kernel.precision = precision
sizeEvents = kernelSelectionData[precision]
self.logic += (
"\n// " + precision + "gemm kernel selection logic\n"
"template<>\n"
"void gemmSelectKernel<" )
if precision == "s":
self.logic += "float"
elif precision == "d":
self.logic += "double"
elif precision == "c":
self.logic += "FloatComplex"
else:
self.logic += "DoubleComplex"
self.logic += (
">(\n"
" clblasOrder order,\n"
" clblasTranspose transA,\n"
" clblasTranspose transB,\n"
" size_t M,\n"
" size_t N,\n"
" size_t K,\n"
" bool betaNonZero,\n"
" float optimalNumElementsPerWorkItem,\n"
" const char **tileKernelSource,\n"
" const char **rowKernelSource,\n"
" const char **colKernelSource,\n"
" const char **cornerKernelSource,\n"
" const char **sourceBuildOptions,\n"
" const unsigned char **tileKernelBinary,\n"
" const unsigned char **rowKernelBinary,\n"
" const unsigned char **colKernelBinary,\n"
" const unsigned char **cornerKernelBinary,\n"
" size_t **tileKernelBinarySize,\n"
" size_t **rowKernelBinarySize,\n"
" size_t **colKernelBinarySize,\n"
" size_t **cornerKernelBinarySize,\n"
" const char **binaryBuildOptions,\n"
" cl_kernel **tileClKernel,\n"
" cl_kernel **rowClKernel,\n"
" cl_kernel **colClKernel,\n"
" cl_kernel **cornerClKernel,\n"
" unsigned int *workGroupNumRows,\n"
" unsigned int *workGroupNumCols,\n"
" unsigned int *microTileNumRows,\n"
" unsigned int *microTileNumCols,\n"
" unsigned int *unroll\n"
") {\n" )
####################################
# order
for order in orderList:
#print precision + "gemm" + "_" + order
kernel.order = order
self.logic += indent(1) + "if (order == " + order + ") {\n"
transList = transDict[precision]
####################################
# transA
for transA in transList:
#print precision + "gemm" + "_" + order + "_" + transA
kernel.transA = transA
self.logic += indent(2) + "if (transA == "
if transA == "N":
self.logic += "clblasNoTrans"
elif transA == "T":
self.logic += "clblasTrans"
else:
self.logic += "clblasConjTrans"
self.logic += ") {\n"
####################################
# transB
for transB in transList:
kernel.transB = transB
self.logic += indent(3) + "if (transB == "
if transB == "N":
self.logic += "clblasNoTrans"
elif transB == "T":
self.logic += "clblasTrans"
else:
self.logic += "clblasConjTrans"
self.logic += ") {\n"
####################################
# beta
for beta in betaList:
#print precision + "gemm" + "_" + order + "_" + transA + "_" + transB + "_B" + str(beta)
kernel.beta = beta
self.logic += indent(4) + "if ( "
if beta == 0:
self.logic += "!betaNonZero"
else:
self.logic += "betaNonZero"
self.logic += " ) {\n"
####################################
# if size event
for sizeEvent in sizeEvents:
self.selectionFile.write( self.logic )
self.logic = ""
sizeMin = sizeEvent[0]
fallbackTile = sizeEvent[1]
validTiles = sizeEvent[2]
self.logic += indent(5)+"if ( M*N >= "+str(sizeMin)+"*"+str(sizeMin) + ") {\n"
#print precision + "gemm" + "_" + order + "_" + transA + "_" + transB + "_B" + str(beta) + "_" + str(sizeMin) + "->" + str(sizeMax)
####################################
# valid tiles
self.logic += indent(6)+"// valid tiles\n"
for tileParams in validTiles:
kernel.workGroupNumRows = tileParams[0]
kernel.workGroupNumCols = tileParams[1]
kernel.microTileNumRows = tileParams[2]
kernel.microTileNumCols = tileParams[3]
kernel.macroTileNumRows = kernel.workGroupNumRows*kernel.microTileNumRows
kernel.macroTileNumCols = kernel.workGroupNumCols*kernel.microTileNumCols
for unroll in unrollDict[precision]:
kernel.unroll = unroll
self.logic += indent(6)+"if ( M%%%d == 0 && N%%%d == 0 && K%%%d == 0) {\n" \
% (kernel.getMultipleM(), kernel.getMultipleN(), kernel.getMultipleK())
self.addBodyForKernel( kernel )
self.logic += indent(6) + "}\n"
####################################
# fallback tile - TODO all tiles begin added
self.logic += indent(6)+"// fallback tile\n"
#print "\nFallback[%i, %i]"%(sizeMin, sizeMax)
kernel.workGroupNumRows = fallbackTile[0]
kernel.workGroupNumCols = fallbackTile[1]
kernel.microTileNumRows = fallbackTile[2]
kernel.microTileNumCols = fallbackTile[3]
kernel.macroTileNumRows = kernel.workGroupNumRows*kernel.microTileNumRows
kernel.macroTileNumCols = kernel.workGroupNumCols*kernel.microTileNumCols
for unroll in unrollDict[precision]:
kernel.unroll = unroll
self.logic += indent(6)+"if ( K%%%d == 0 ) {\n" \
% (kernel.getMultipleK())
self.addBodyForKernel( kernel )
self.logic += indent(6) + "}\n"
####################################
# end size event
self.logic += indent(5) + "} // end size\n"
####################################
# end beta
self.logic += indent(4) + "} // end beta\n"
####################################
# end transB
self.logic += indent(3) + "} // end transB\n"
####################################
# end transA
self.logic += indent(2) + "} // end transA\n"
####################################
# end order
self.logic += indent(1) + "} // end order\n"
####################################
# end precision
self.logic += indent(0) + "} // end precision function\n"
# write last precision
self.selectionFile.write( self.logic )
self.selectionFile.write( "\n" )
def addBodyForKernel( self, kernel ):
#self.logic += indent(7) + "printf(\"selected kernel: " + kernel.getName() + "\\n\");\n"
self.logic += indent(7) + "*tileKernelSource = " + kernel.getName() + "_src;\n"
self.logic += indent(7) + "*rowKernelSource = " + kernel.getRowName() + "_src;\n"
self.logic += indent(7) + "*colKernelSource = " + kernel.getColName() + "_src;\n"
self.logic += indent(7) + "*cornerKernelSource = " + kernel.getCornerName() + "_src;\n"
self.logic += indent(7) + "*sourceBuildOptions = " + kernel.getName() + "_srcBuildOptions;\n"
self.logic += indent(7) + "*tileKernelBinary = " + kernel.getName() + "_bin;\n"
self.logic += indent(7) + "*rowKernelBinary = " + kernel.getRowName() + "_bin;\n"
self.logic += indent(7) + "*colKernelBinary = " + kernel.getColName() + "_bin;\n"
self.logic += indent(7) + "*cornerKernelBinary = " + kernel.getCornerName() + "_bin;\n"
self.logic += indent(7) + "*tileKernelBinarySize = &" + kernel.getName() + "_binSize;\n"
self.logic += indent(7) + "*rowKernelBinarySize = &" + kernel.getRowName() + "_binSize;\n"
self.logic += indent(7) + "*colKernelBinarySize = &" + kernel.getColName() + "_binSize;\n"
self.logic += indent(7) + "*cornerKernelBinarySize = &" + kernel.getCornerName() + "_binSize;\n"
self.logic += indent(7) + "*binaryBuildOptions = " + kernel.getName() + "_binBuildOptions;\n"
self.logic += indent(7) + "*tileClKernel = &" + kernel.getName() + "_clKernel;\n"
self.logic += indent(7) + "*rowClKernel = &" + kernel.getRowName() + "_clKernel;\n"
self.logic += indent(7) + "*colClKernel = &" + kernel.getColName() + "_clKernel;\n"
self.logic += indent(7) + "*cornerClKernel = &" + kernel.getCornerName() + "_clKernel;\n"
self.logic += indent(7) + "*workGroupNumRows = " + kernel.getName() + "_workGroupNumRows;\n"
self.logic += indent(7) + "*workGroupNumCols = " + kernel.getName() + "_workGroupNumCols;\n"
self.logic += indent(7) + "*microTileNumRows = " + kernel.getName() + "_microTileNumRows;\n"
self.logic += indent(7) + "*microTileNumCols = " + kernel.getName() + "_microTileNumRows;\n"
self.logic += indent(7) + "*unroll = " + kernel.getName() + "_unroll;\n"
self.logic += indent(7) + "return;\n"
##############################################################################
# KSL - write to file
##############################################################################
def writeToFile(self):
self.selectionFile.close()
self.incFile.write( self.inc )
self.incFile.close()
################################################################################
# KSM - Kernel Selection Manual/Specific File
################################################################################
class KernelSelectionSpecific:
zeroIndent = " "
tab = " "
##############################################################################
# KSL - default constructor
##############################################################################
def __init__(self):
self.incFileName = Common.getIncludePath() + "AutoGemmKernelSelectionSpecific.h"
self.incFile = open(self.incFileName, "w")
self.incFile.write( Common.getAutoGemmHeader() )
self.kernelSelectionFileName = Common.getIncludePath() + "AutoGemmKernelSelectionSpecific.cpp"
self.selectionFile = open(self.kernelSelectionFileName, "w")
self.selectionFile.write( Common.getAutoGemmHeader() )
self.inc = (
"#include <clBLAS.h>\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelSources.h\"\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBinaries.h\"\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsSource.h\"\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsBinary.h\"\n"
"#include \"" + Common.getRelativeIncludePath() + "AutoGemmClKernels.h\"\n"
"\n"
"// kernel selection specific template\n"
"template<typename Precision>\n"
"bool gemmSelectKernelSpecific(\n"
" clblasOrder order,\n"
" clblasTranspose transA,\n"
" clblasTranspose transB,\n"
" bool betaNonZero,\n"
" unsigned int macroTileNumRows,\n"
" unsigned int macroTileNumCols,\n"
" unsigned int unroll,\n"
" const char **tileKernelSource,\n"
" const char **rowKernelSource,\n"
" const char **colKernelSource,\n"
" const char **cornerKernelSource,\n"
" const char **sourceBuildOptions,\n"
" const unsigned char **tileKernelBinary,\n"
" const unsigned char **rowKernelBinary,\n"
" const unsigned char **colKernelBinary,\n"
" const unsigned char **cornerKernelBinary,\n"
" size_t **tileKernelBinarySize,\n"
" size_t **rowKernelBinarySize,\n"
" size_t **colKernelBinarySize,\n"
" size_t **cornerKernelBinarySize,\n"
" const char **binaryBuildOptions,\n"
" cl_kernel **tileClKernel,\n"
" cl_kernel **rowClKernel,\n"
" cl_kernel **colClKernel,\n"
" cl_kernel **cornerClKernel,\n"
" unsigned int *workGroupNumRows,\n"
" unsigned int *workGroupNumCols,\n"
" unsigned int *microTileNumRows,\n"
" unsigned int *microTileNumCols\n"
");\n\n" )
self.logic = "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelSelectionSpecific.h\"\n"
self.precisionInitialized = False
self.orderInitialized = False
self.transInitialized = False
self.betaInitialized = False
def newPrecision(self, precision ):
#print "KernelSelectionSpecific: " + precision + "gemm"
if self.precisionInitialized:
self.logic += self.zeroIndent+self.tab+self.tab + "}\n" # 2 tabs
self.logic += self.zeroIndent+self.tab + "}\n" # 1 tab
self.logic += self.zeroIndent+"}\n"
self.logic += self.zeroIndent + "return false; // didn't find a match\n"
self.logic += "}\n\n"
else:
self.logic += self.zeroIndent
self.logic += (
"\n// " + precision + "gemm kernel selection specific\n"
"template<>\n"
"bool gemmSelectKernelSpecific<" )
if precision == "s":
self.logic += "float"
elif precision == "d":
self.logic += "double"
elif precision == "c":
self.logic += "FloatComplex"
else:
self.logic += "DoubleComplex"
self.logic += (
">(\n"
" clblasOrder order,\n"
" clblasTranspose transA,\n"
" clblasTranspose transB,\n"
" bool betaNonZero,\n"
" unsigned int macroTileNumRows,\n"
" unsigned int macroTileNumCols,\n"
" unsigned int unroll,\n"
" const char **tileKernelSource,\n"
" const char **rowKernelSource,\n"
" const char **colKernelSource,\n"
" const char **cornerKernelSource,\n"
" const char **sourceBuildOptions,\n"
" const unsigned char **tileKernelBinary,\n"
" const unsigned char **rowKernelBinary,\n"
" const unsigned char **colKernelBinary,\n"
" const unsigned char **cornerKernelBinary,\n"
" size_t **tileKernelBinarySize,\n"
" size_t **rowKernelBinarySize,\n"
" size_t **colKernelBinarySize,\n"
" size_t **cornerKernelBinarySize,\n"
" const char **binaryBuildOptions,\n"
" cl_kernel **tileClKernel,\n"
" cl_kernel **rowClKernel,\n"
" cl_kernel **colClKernel,\n"
" cl_kernel **cornerClKernel,\n"
" unsigned int *workGroupNumRows,\n"
" unsigned int *workGroupNumCols,\n"
" unsigned int *microTileNumRows,\n"
" unsigned int *microTileNumCols\n"
") {\n" )
self.precisionInitialized = True
self.orderInitialized = False
self.transInitialized = False
self.betaInitialized = False
####################################
# KSL - new order
def newOrder(self, order):
if (self.orderInitialized):
self.logic += self.zeroIndent+self.tab+self.tab + "}\n" # 2 tabs
self.logic += self.zeroIndent+self.tab + "}\n" # 1 tab
self.logic += self.zeroIndent
self.logic += "} else "
else:
self.logic += self.zeroIndent
self.logic += "if (order == " + order + ") {\n"
self.orderInitialized = True
self.transInitialized = False
self.betaInitialized = False
####################################
# KSL - new trans
def newTrans(self, transA, transB):
if (self.transInitialized):
self.logic += self.zeroIndent+self.tab+self.tab + "}\n" # 2 tabs
self.logic += self.zeroIndent+self.tab # 1 tab
self.logic += "} else "
else:
self.logic += self.zeroIndent+self.tab # 1 tabs
self.logic += "if (transA == "
if transA == "N":
self.logic += "clblasNoTrans"
elif transA == "T":
self.logic += "clblasTrans"
else:
self.logic += "clblasConjTrans"
self.logic += " && transB == "
if transB == "N":
self.logic += "clblasNoTrans"
elif transB == "T":
self.logic += "clblasTrans"
else:
self.logic += "clblasConjTrans"
self.logic += ") {\n"
self.transInitialized = True
self.betaInitialized = False
####################################
# KSL - new beta
def newBeta(self, beta):
if (self.betaInitialized):
self.logic += self.zeroIndent+self.tab+self.tab # 2 tabs
self.logic += "} else "
else:
self.logic += self.zeroIndent+self.tab+self.tab # 2 tabs
self.logic += "if ( "
if beta == 0:
self.logic += "!betaNonZero"
else:
self.logic += "betaNonZero"
self.logic += " ) {\n"
self.betaInitialized = True
##############################################################################
# KSL - add new kernel
##############################################################################
def newKernel(self, kernel):
# new kernel
self.logic += self.zeroIndent+self.tab+self.tab+self.tab # 3 tabs
self.logic += ("if ( macroTileNumRows == %u && macroTileNumCols == %u "
"&& unroll == %u) {\n") \
% ( kernel.macroTileNumRows, kernel.macroTileNumCols, kernel.unroll )
#self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab+self.tab # 5 tabs
#self.logic += "printf(\"selected kernel: " + kernel.getName() + "\\n\");\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*tileKernelSource = " + kernel.getName() + "_src;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*rowKernelSource = " + kernel.getRowName() + "_src;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*colKernelSource = " + kernel.getColName() + "_src;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*cornerKernelSource = " + kernel.getCornerName() + "_src;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*sourceBuildOptions = " + kernel.getName() + "_srcBuildOptions;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*tileKernelBinary = " + kernel.getName() + "_bin;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*rowKernelBinary = " + kernel.getRowName() + "_bin;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*colKernelBinary = " + kernel.getColName() + "_bin;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*cornerKernelBinary = " + kernel.getCornerName() + "_bin;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*tileKernelBinarySize = &" + kernel.getName() + "_binSize;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*rowKernelBinarySize = &" + kernel.getRowName() + "_binSize;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*colKernelBinarySize = &" + kernel.getColName() + "_binSize;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*cornerKernelBinarySize = &" + kernel.getCornerName() + "_binSize;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*binaryBuildOptions = " + kernel.getName() + "_binBuildOptions;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*tileClKernel = &" + kernel.getName() + "_clKernel;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*rowClKernel = &" + kernel.getRowName() + "_clKernel;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*colClKernel = &" + kernel.getColName() + "_clKernel;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*cornerClKernel = &" + kernel.getCornerName() + "_clKernel;\n"
# dims
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*workGroupNumRows = " + kernel.getName() + "_workGroupNumRows;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*workGroupNumCols = " + kernel.getName() + "_workGroupNumCols;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*microTileNumRows = " + kernel.getName() + "_microTileNumRows;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "*microTileNumCols = " + kernel.getName() + "_microTileNumCols;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs
self.logic += "return true;\n"
self.logic += self.zeroIndent+self.tab+self.tab+self.tab # 3 tabs
self.logic += "}\n"
self.selectionFile.write( self.logic )
self.logic = ""
##############################################################################
# KSL - write to file
##############################################################################
def writeToFile(self):
self.logic += self.zeroIndent+self.tab+self.tab + "}\n" # 2 tabs
self.logic += self.zeroIndent+self.tab + "}\n" # 1 tab
self.logic += self.zeroIndent + "}\n" # 0 tab
self.logic += self.zeroIndent + "return false; // didn't find a match\n"
self.logic += "}\n" # close function
self.selectionFile.write(self.logic)
self.selectionFile.write("\n")
self.selectionFile.close()
self.incFile.write(self.inc)
self.incFile.write("\n")
self.incFile.close()
################################################################################
# Main
################################################################################
def writeKernelSelection():
print "AutoGemm.py: Generating kernel selection."
if not os.path.exists( Common.getIncludePath() ):
os.makedirs( Common.getIncludePath() )
########################################
# kernel selection specific
kss = KernelSelectionSpecific()
# for each precision
kernel = KernelParameters.KernelParameters()
for precision in AutoGemmParameters.precisions:
kernel.precision = precision
kss.newPrecision(precision)
# valid tiles for this precision
tiles = AutoGemmParameters.getTilesForPrecision(precision)
# for non tile parameters
for order in AutoGemmParameters.orders:
kernel.order = order
kss.newOrder(order)
for transA in AutoGemmParameters.transposes[precision]:
kernel.transA = transA
for transB in AutoGemmParameters.transposes[precision]:
kernel.transB = transB
kss.newTrans(transA, transB)
for beta in AutoGemmParameters.betas:
kernel.beta = beta
kss.newBeta(beta)
# for tile parameters
for tile in tiles:
kernel.useTile(tile)
kss.newKernel(kernel)
kss.writeToFile()
########################################
# kernel selection
ks = KernelSelection( \
AutoGemmParameters.precisions, \
AutoGemmParameters.orders, \
AutoGemmParameters.transposes, \
AutoGemmParameters.betas, \
AutoGemmParameters.unrolls, \
AutoGemmParameters.kernelSelectionData )
ks.writeToFile()
################################################################################
# Main
################################################################################
if __name__ == "__main__":
if len(sys.argv) == 2:
Common.setOutputPath(sys.argv[1])
else:
print "Warning: No output path specified; default is working directory."
writeKernelSelection()
| ghisvail/clBLAS | src/library/blas/AutoGemm/KernelSelection.py | Python | apache-2.0 | 28,193 |
def file_from_tar(context, tar, member):
return tar.extractfile(member).read()
def static_file(context, path):
with open(path, 'rb') as fobj:
return fobj.read()
| akheron/stango | stango/views.py | Python | mit | 178 |
# -*- coding: utf-8 -*-
#
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from typing import Dict
from typing import List
from typing import Union
from typing import Optional
from kivy import Logger
from ORCA.scripts.BaseScript import cBaseScript
from ORCA.vars.Replace import ReplaceVars
from ORCA.utils.FileName import cFileName
from ORCA.utils.Path import cPath
from ORCA.utils.TypeConvert import ToBool
import ORCA.Globals as Globals
'''
<root>
<repositorymanager>
<entry>
<name>TV Logos Helper Script</name>
<description language='English'>Helper Scripts to find TV Logos</description>
<description language='German'>Hilfs Skript um TV-Logos zu finden</description>
<author>Carsten Thielepape</author>
<version>5.0.4</version>
<minorcaversion>5.0.4</minorcaversion>
<skip>0</skip>
<sources>
<source>
<local>$var(APPLICATIONPATH)/scripts/helper/helper_gettvlogo</local>
<sourcefile>$var(REPOSITORYWWWPATH)/scripts/helper_gettvlogo.zip</sourcefile>
<targetpath>scripts/helper</targetpath>
</source>
</sources>
<skipfiles>
</skipfiles>
</entry>
</repositorymanager>
</root>
'''
typeReference = Dict[str,str]
typeReferences = Dict[str,typeReference]
class cLogoItem:
"""
Helper class to represent a logo item
"""
def __init__(self,*,uLogoKey:str,uFnLogo:str):
self.uLogoKey:str = uLogoKey
self.uFnLogo:str = uFnLogo
class cLogosForFolder(Dict[str,cLogoItem]):
"""
Helper class to represent a dictionary of logo items
"""
pass
class cScript(cBaseScript):
"""
WikiDoc:Doc
WikiDoc:Context:Scripts
WikiDoc:Page:Scripts-helper_tvlogos
WikiDoc:TOCTitle:Helper Script TV-Logos
= TV-Logos Helper =
This is a helper script for Enigma commands
<div style="overflow:auto; ">
{| class="wikitable"
! align="left" | Attribute
! align="left" | Description
|-
|cmd_type
|The requested helper function: can be "get_tvlogofile","normalize_reference" or "normalize_channelname"
|-
|reference
|for "normalize_reference": The reference string to be normalized, for "get_tvlogofile" the reference to find
|-
|channelname
|for "normalize_channelname": The channelname string to be normalized, for "get_tvlogofile" the name to find
|-
|removehd
|for "normalize_channelname": boolean flag if HD, UHD / SD flags should be removed
|-
|logopackfoldername
|For "get_tvlogofile": the name of the folder (only the folder name, not the fullpath) in "recources" , below "tvlogos"
|}</div>
Remarks:
"get_tvlogofile": Return a logo filename for the given reference or channelname
"get_channelreference": Will return the channel reference for a channel number
"assign_channels": Sets the TV Logos for the parsed channels
WikiDoc:End
"""
def __init__(self):
super().__init__()
self.uType:str = u'HELPERS'
self.uIniFileLocation:str = u'none'
self.dLogoPackFolder:Dict[str,cLogosForFolder] = {}
self.dReferences:typeReferences = {}
def Init(self, uObjectName: str, oFnObject: Union[cFileName, None] = None) -> None:
""" Main Init, loads the Enigma Script"""
super().Init(uObjectName= uObjectName, oFnObject=oFnObject)
def RunScript(self, *args, **kwargs) -> Union[Dict,None]:
""" Main Entry point, parses the cmd_type and calls the relevant functions """
uCmdType:str
try:
if 'cmd_type' in kwargs:
uCmdType = kwargs['cmd_type']
if uCmdType == u'get_tvlogofile':
return self.GetTVLogoFile(**kwargs)
elif uCmdType == u'normalize_reference':
return self.NormalizeReference(**kwargs)
elif uCmdType == u'normalize_channelname':
return self.GetTVLogoFile(**kwargs)
return None
except Exception as e:
self.ShowError(uMsg="Can''t run Get TV-Logos Helper script, invalid parameter",uParConfigName=self.uConfigName,oException=e)
return {"ret":1}
def GetTVLogoFile(self, **kwargs) -> Dict:
"""
Sets the TV Logos for the parsed channels
:param kwargs: argument dic, need to have definitionalias, interface, configname, force
"""
try:
uLogoPackFolderName:str = ReplaceVars(kwargs['logopackfoldername'])
uReference:str = ReplaceVars(kwargs['reference'])
uChannelName:str = ReplaceVars(kwargs['channelname'])
uFnChannel:str
self.CollectAllChannelLogos(uLogoPackFolderName=uLogoPackFolderName)
uFnChannel=self.FindChannelLogo(uLogoPackFolderName=uLogoPackFolderName,uChannelName=uChannelName,uReference=uReference)
return {"ret":0,"filename":uFnChannel}
except Exception as e:
self.ShowError(uMsg="Can''t find TV Logos", uParConfigName="",oException=e)
return {"ret":1,"filename":""}
def FindChannelLogo(self,*,uChannelName:str, uReference:str, uLogoPackFolderName:str) -> str:
"""
Finds a channel Logo in the list of all logo files
:param str uChannelName: The name of the channel to look for
:param str uReference: The SAT Reference
:param str uLogoPackFolderName: the name of the folder (only the folder name, not the fullpath) in "recources" , below "tvlogos"
:return: The logo file name (full path), or "text:Channelname" if logo file can't be found
"""
iPos = uChannelName.find("/")
if iPos>0:
uChannelName=uChannelName[:iPos]
uFnLogo = self.FindChannelLogo_sub(uChannelName=uChannelName,uReference=uReference,uLogoPackFolderName=uLogoPackFolderName)
if uFnLogo == u"" or uFnLogo is None:
uFnLogo = u"text:"+uChannelName
Logger.warning("Can't find logo [%s] [%s] in [%s]:" % ( uChannelName,uReference,uLogoPackFolderName))
return uFnLogo
def FindChannelLogo_sub(self,*,uChannelName:str, uReference:str, uLogoPackFolderName:str) -> str:
"""
Helper function to just find a channel logo file, or none if not found
:param str uChannelName: The name of the channel to look for
:param str uReference: The SAT Reference
:param str uLogoPackFolderName: the name of the folder (only the folder name, not the fullpath) in "recources" , below "tvlogos"
:return: The logo file name (full path), or "" if logo file can't be found
"""
uFnLogo:str
if uReference:
uFnLogo = self.FindLogoByReference(uLogoPackFolderName=uLogoPackFolderName,uReference=uReference)
if uFnLogo:
return uFnLogo
return self.FindLogoByChannelName(uLogoPackFolderName=uLogoPackFolderName,uChannelName=uChannelName)
def CollectAllChannelLogos(self,*,uLogoPackFolderName:str) -> None:
"""
Collect all channel logos in a dict (if not already done)
"""
if not uLogoPackFolderName in self.dLogoPackFolder:
self.dLogoPackFolder[uLogoPackFolderName] = cLogosForFolder()
self.CollectAllChannelLogos_sub(oPath=Globals.oPathTVLogos,uLogoPackFolderName=uLogoPackFolderName, bFirst=True)
def CollectAllChannelLogos_sub(self,*,oPath,uLogoPackFolderName:str,bFirst=True) -> None:
"""
Helper function to recursive get all files in a folder tree
:param cPath oPath: The start folder in the folder tree
:param bool bFirst: Flag, is this been called first time on recursion
:param str uLogoPackFolderName: the name of the folder (only the folder name, not the full path) in "resources" , below "tvlogos"
"""
oFinalPath:cPath
aFolders:List[str]
uFolder:str
if bFirst:
oFinalPath = oPath+uLogoPackFolderName
self.LoadReferences(oPath=oFinalPath,uLogoPackFolderName=uLogoPackFolderName)
else:
oFinalPath = oPath
self.GetFolderFiles(oPath=oFinalPath,uLogoPackFolderName=uLogoPackFolderName)
aFolder = oFinalPath.GetFolderList(bFullPath=True)
for uFolder in aFolder:
self.CollectAllChannelLogos_sub(oPath=cPath(uFolder),uLogoPackFolderName=uLogoPackFolderName,bFirst=False)
def GetFolderFiles(self,*,oPath:cPath,uLogoPackFolderName:str) -> None:
"""
Helper function to get all files in a folder (not folder)
:param cPath oPath: The folder to collect the files
:param str uLogoPackFolderName: The TV Logo Pack Folder name
"""
aFileList:List[str] = oPath.GetFileList(bFullPath=True,bSubDirs=False)
dLogosForFolder:cLogosForFolder = cLogosForFolder()
uBaseName:str
uFnLogo:str
for uFnLogo in aFileList:
uBaseName = os.path.basename(uFnLogo)
uFileBaseNameStandard = NormalizeName(uName=uBaseName)
oLogoItem = cLogoItem(uLogoKey=uFileBaseNameStandard,uFnLogo=uFnLogo)
dLogosForFolder[oLogoItem.uLogoKey] = oLogoItem
# if the filename is a reference, normalize it and add it
if uBaseName.count("_")>3:
oLogoItem = cLogoItem(uLogoKey=NormalizeReference(uReference=uFileBaseNameStandard),uFnLogo=uFnLogo)
dLogosForFolder[oLogoItem.uLogoKey] = oLogoItem
self.dLogoPackFolder[uLogoPackFolderName].update(dLogosForFolder)
def FindLogoByReference(self,*,uLogoPackFolderName:str, uReference:str) -> str:
"""
Finds a channel logo by a give channel reference
:param uLogoPackFolderName: The name of the folder
:param uReference: The reference string
:return: A filename for the channel reference
"""
uFnLogo:Optional[str]
oLogoItem:Optional[cLogoItem]
dReferences:Dict[str,str] = self.dReferences.get(uLogoPackFolderName)
if dReferences is not None:
uFnLogo = dReferences.get(NormalizeReference(uReference=uReference))
if uFnLogo is not None:
uFnReference:str = NormalizeName(uName=uFnLogo.replace("\n",""))+".png"
oLogosForFolder:Optional[cLogosForFolder] = self.dLogoPackFolder.get(uLogoPackFolderName)
if oLogosForFolder is not None:
oLogoItem = oLogosForFolder.get(uFnReference)
if oLogoItem is None:
oLogoItem = oLogosForFolder.get(NormalizeReference(uReference=uReference))
if oLogoItem is not None:
return oLogoItem.uFnLogo.replace(Globals.oPathResources.string, '$var(RESOURCEPATH)')
return ""
def FindLogoByChannelName(self,*,uLogoPackFolderName:str, uChannelName:str) -> str:
"""
Finds a logo by channel name
:param uLogoPackFolderName: the logo pack name to use
:param uChannelName: The name of of the channel to check for
:return: the logo file name of the channel logo
"""
oLogoItem:Optional[cLogoItem] = None
oLogosForFolder:Optional[cLogosForFolder] = self.dLogoPackFolder.get(uLogoPackFolderName)
if oLogosForFolder is not None:
oLogoItem = oLogosForFolder.get(NormalizeName(uName=uChannelName+".png",bRemoveHD=False))
if oLogoItem is None:
oLogoItem = oLogosForFolder.get(NormalizeName(uName=uChannelName+".png", bRemoveHD=True))
if oLogoItem:
return oLogoItem.uFnLogo.replace(Globals.oPathResources.string, '$var(RESOURCEPATH)')
return ""
def NormalizeChannelName(self, **kwargs) -> Dict:
"""
Sets the TV Logos for the parsed channels
:param kwargs: argument dic, need to have definitionalias, interface, configname, force
"""
try:
uChannelName:str = ReplaceVars(kwargs['channelname'])
bRemoveHD:bool = ToBool(ReplaceVars(kwargs['removehd']))
return {"ret":1,"channelname":NormalizeName(uName=uChannelName,bRemoveHD=bRemoveHD)}
except Exception as e:
self.ShowError(uMsg="Can''t run NormalizeChannelName, invalid parameter",uParConfigName=self.uConfigName,oException=e)
return {"ret":1}
def NormalizeReference(self, **kwargs) -> Dict:
"""
Sets the TV Logos for the parsed channels
:param kwargs: argument dic, need to have definitionalias, interface, configname, force
"""
try:
uReference:str = ReplaceVars(kwargs['reference'])
return {"ret":1,"reference":NormalizeReference(uReference=uReference)}
except Exception as e:
self.ShowError(uMsg="Can''t run NormalizeReference, invalid parameter",uParConfigName=self.uConfigName,oException=e)
return {"ret":1}
def LoadReferences(self,*,oPath:cPath,uLogoPackFolderName:str) -> None:
"""
Loads References from a file. A reference file matches a logo file name to a SAT/DVB:C service id, the file name is fixed "srp.index.txt"
:param oPath: the path to the icon root foolder
:param uLogoPackFolderName: the logo pack folder name
:return:
"""
aLines:List[str]
uLine:str
uReference:str
uFileName:str
if uLogoPackFolderName in self.dReferences:
return
self.dReferences[uLogoPackFolderName] = {}
oFnReferences = cFileName(oPath) + "srp.index.txt"
if oFnReferences.Exists():
oFile = open(oFnReferences.string,"r")
aLines = oFile.readlines()
oFile.close()
for uLine in aLines:
uReference,uFileName=uLine.split("=")
uReference= NormalizeReference(uReference=uReference)
self.dReferences[uLogoPackFolderName][uReference] = uFileName.split("-")[0]
def NormalizeReference(*,uReference:str) -> str:
"""
Helper function to increase the match rate of icon names to channel names by removing blanks and special characters
:param str uReference: the core reference
:return: normalized reference
"""
if ":" in uReference:
aParts = uReference.split(":")
uReference = aParts[3]+aParts[4]+aParts[5]+aParts[6]
uReference=uReference.replace("_","")
# uReference = uReference.replace("0", "")
return uReference
def NormalizeName(*,uName:str, bRemoveHD:bool = False) -> str:
"""
Helper function to increase the match rate of icon names to channel names by removing blanks and special characters
:param str uName: the name to normalize
:param bool bRemoveHD: Remove the 'HD', 'UHD' tags as well
:return: normalized file name
"""
uName = uName.lower().replace(" ", "").replace("/","").replace("\\","").replace("+","plus").replace("-","").replace("_","")
if bRemoveHD:
uName = uName.replace("uhd.",".").replace("sd.",".").replace("hd.",".")
return remove_umlaut(string=uName)
def remove_umlaut(*,string:str) -> str:
"""
Removes umlauts from strings and replaces them with the letter+e convention
:param string: string to remove umlauts from
:return: unumlauted string
"""
string = string.replace(u"ü", u'ue')
string = string.replace(u"Ü", u'Ue')
string = string.replace(u"ä", u'ae')
string = string.replace(u"Ä", u'Ae')
string = string.replace(u"ö", u'oe')
string = string.replace(u"Ö", u'Oe')
string = string.replace(u"ß", u'ss')
return string
'''
<e2movie>
<e2servicereference>
1:0:0:0:0:0:0:0:0:0:/media/hdd/movie/20191101 1711 - Sky Cinema Spooky Halloween HD - Die Legende der Wächter.ts
</e2servicereference>
<e2title>Die Legende der Wächter</e2title>
<e2description>Fantasyfilm</e2description>
<e2descriptionextended>
Atemberaubend animiertes Fantasyabenteuer für die ganze Familie nach den Kinderbüchern von Kathryn Lasky. - Als die jungen Schleiereulen Sören und Kludd von ihrem Heimatbaum stürzen, werden die Brüder von mysteriösen Eulen gerettet. Doch die Retter entpuppen sich als die diabolischen "Reinsten", die mit Hilfe eines Sklavenheeres ein despotisches Regime errichten wollen. Sören gelingt mit der kleinen Gylfie die Flucht. Gemeinsam suchen sie die legendären Wächter von Ga'Hoole, die schon einmal die "Reinsten" vertrieben haben sollen. 89 Min. AUS/USA 2010. Von Zack Snyder. Ab 6 Jahren
</e2descriptionextended>
<e2servicename>Sky Cinema Spooky Halloween HD</e2servicename>
<e2time>1572624682</e2time>
<e2length>21:09</e2length>
<e2tags/>
<e2filename>
/media/hdd/movie/20191101 1711 - Sky Cinema Spooky Halloween HD - Die Legende der Wächter.ts
</e2filename>
<e2filesize>1397197200</e2filesize>
</e2movie>
'''
| thica/ORCA-Remote | src/scripts/helper/helper_gettvlogo/script.py | Python | gpl-3.0 | 18,571 |
# Search for lines that start with 'Details: rev='
# followed by numbers and '.'
# Then print the number if it is greater than zero
import re
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
x = re.findall('^Details:.*rev=([0-9.]+)', line)
if len(x) > 0:
print(x)
| mkhuthir/learnPython | Book_pythonlearn_com/18_regex/re12.py | Python | mit | 306 |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lifting parameters in Haiku."""
import functools
import types
from typing import Any, Callable, Mapping, MutableMapping, Tuple, TypeVar
from haiku._src import base
from haiku._src import data_structures
from haiku._src import module
from haiku._src import transform
T = TypeVar("T")
MutableBundle = MutableMapping[str, MutableMapping[str, Any]]
# If you are forking replace this with `import haiku as hk`.
hk = types.ModuleType("haiku")
hk.Params = base.Params
hk.State = base.State
hk.Module = module.Module
hk.running_init = transform.running_init
hk.data_structures = data_structures
del module, data_structures, transform
def pack_into_dict(src: hk.Params,
dst: MutableMapping[str, Any],
prefix: str,
state: bool = False):
"""Puts items from src into dst, with an added prefix."""
for key, value in src.items():
if prefix:
new_key = f"{prefix}/{key}"
else:
new_key = key
assert new_key not in dst
value = dict(value)
if state:
value = {k: base.StatePair(v, v) for k, v in value.items()}
dst[new_key] = value
def unpack_from_dict(src: hk.Params, prefix: str) -> MutableBundle:
"""Returns pairs from src where key begins with prefix, cutting off prefix."""
if not prefix:
return {k: dict(v) for k, v in src.items()}
l = len(prefix)
return {k[l:]: dict(v) for k, v in src.items() if k.startswith(prefix)}
def add_state_to_init_fn(
init_fn: Callable[..., hk.Params],
) -> Callable[..., Tuple[hk.Params, hk.State]]:
def wrapped_init_fn(*a, **k):
params = init_fn(*a, **k)
if not isinstance(params, Mapping):
raise ValueError("For stateful lifted functions use `hk.lift_with_state`")
return params, {}
return wrapped_init_fn
# TODO(tycai): Make sure transformed functions have better names.
class LiftingModule(hk.Module):
"""See :func:`lift` or :func:`lift_with_state`."""
def __init__(self, init_fn, transparent=False, name="lifted"):
super().__init__(name=name)
self._init_fn = init_fn
if transparent:
self._prefix_name = "/".join(self.module_name.split("/")[:-1])
else:
self._prefix_name = self.module_name
def __call__(self, *args, **kwargs):
frame = base.current_frame()
outer_params = frame.params
outer_state = frame.state
if hk.running_init():
inner_params, inner_state = self._init_fn(*args, **kwargs)
# Lift parameters into this transform's params_dict.
pack_into_dict(inner_params, outer_params, self._prefix_name)
pack_into_dict(inner_state, outer_state, self._prefix_name, state=True)
return inner_params, inner_state
else:
if self._prefix_name:
prefix = f"{self._prefix_name}/"
else:
prefix = ""
inner_params = unpack_from_dict(outer_params, prefix)
inner_state = unpack_from_dict(outer_state, prefix)
inner_state = base.extract_state(inner_state, initial=False)
inner_params = hk.data_structures.to_haiku_dict(inner_params)
inner_state = hk.data_structures.to_haiku_dict(inner_state)
return inner_params, inner_state
def lift(
init_fn: Callable[..., hk.Params],
name: str = "lifted",
) -> Callable[..., hk.Params]:
r"""Lifts the given init fn to a function in the current Haiku namespace.
During init, the returned callable will run the given ``init_fn``, and include
the resulting params in the outer transform's dictionaries.
During ``apply``, the returned callable will instead pull the relevant
parameters from the outer transform's dictionaries.
Must be called inside :func:`transform`\ , and be passed the ``init``
member of a :class:`Transformed`\ .
The user must ensure that the given ``init`` does not accidentally catch
modules from an outer :func:`transform` via functional closure.
Example:
>>> def g(x):
... return hk.Linear(1)(x)
>>> g = hk.transform(g)
>>> init_rng = hk.next_rng_key() if hk.running_init() else None
>>> x = jnp.ones([1, 1])
>>> params = hk.lift(g.init, name='f_lift')(init_rng, x)
>>> out = g.apply(params, None, x)
Args:
init_fn: The ``init`` function from an :class:`Transformed`\ .
name: A string name to prefix parameters with.
Returns:
A callable that during ``init`` injects parameter values into the outer
context and during ``apply`` reuses parameters from the outer context. In
both cases returns parameter values to be used with an ``apply`` function.
"""
base.assert_context("lift")
init_fn = add_state_to_init_fn(init_fn)
params_and_state_fn, updater = lift_with_state(init_fn, name)
updater.ignore_update()
return lambda *a, **k: params_and_state_fn(*a, **k)[0]
def transparent_lift(
init_fn: Callable[..., hk.Params]
) -> Callable[..., hk.Params]:
"""Similar to `lift` except no additional scope is added to the parameters."""
base.assert_context("lift")
init_fn = add_state_to_init_fn(init_fn)
lifted = LiftingModule(init_fn, transparent=True)
# NOTE: Using lambda to avoid exposing module object.
return lambda *a, **k: lifted(*a, **k)[0] # pylint: disable=unnecessary-lambda
def with_assert_not_used(f):
"""Validates that an updater method is called correctly."""
# NOTE defined outside LiftWithStateUpdater to avoid adding this to the public
# API and letting users call directly.
@functools.wraps(f)
def wrapper(self, *a, **k):
if self._used: # pylint: disable=protected-access
raise ValueError("State updater must only be used once.")
if not base.inside_transform():
raise ValueError(
"State updater must be used inside hk.transform_with_state.")
if self._context_id != id(base.current_context()): # pylint: disable=protected-access
raise ValueError(
"State updater must be used within the same call to init/apply.")
self._used = True # pylint: disable=protected-access
return f(self, *a, **k)
return wrapper
class LiftWithStateUpdater:
"""Handles updating the state for a `lift_with_state` computation."""
__slots__ = ("_used", "_name", "_context_id")
def __init__(self, name: str):
self._used = False
self._name = name
ctx = base.current_context()
# Note: using ID is safe here because we know the lifetime of the context
# instance will outlive the updater thanks to the callback.
self._context_id = id(ctx)
ctx.add_teardown_callback(self.assert_used)
def assert_used(self):
if not self._used:
raise ValueError("LiftWithStateUpdater (from `lift_with_state`) must be "
"used, call `.update(..)` or `.ignore_update()` before "
"it goes out of scope.")
@with_assert_not_used
def ignore_update(self):
"""Notifies the updater that state does not need to be updated."""
pass
@with_assert_not_used
def update(self, state: hk.State):
"""Updates Haiku's internal state to the given state."""
frame = base.current_frame()
for mod_name, bundle in state.items():
mod_name = f"{self._name}/{mod_name}"
for name, value in bundle.items():
initial_pair = base.StatePair(value, value)
initial = frame.state[mod_name].get(name, initial_pair).initial
frame.state[mod_name][name] = base.StatePair(initial, value)
def _to_callable(f: Callable[..., T]) -> Callable[..., T]:
"""Enapsulates the given callable inside a lambda."""
# Prevents us from leaking methods other than __call__ on `f`.
return lambda *a, **k: f(*a, **k) # pylint: disable=unnecessary-lambda
def lift_with_state(
init_fn: Callable[..., Tuple[hk.Params, hk.State]],
name: str = "lifted",
) -> Tuple[Callable[..., Tuple[hk.Params, hk.State]], LiftWithStateUpdater]:
r"""Lifts the given init fn to a function in the current Haiku namespace.
This function returns two objects. The first is a callable that runs your init
function with slightly behaviour based on init vs. apply time. The second is
an updater that can be used to pass updated state values that result from
running your apply function. See later in the docs for a worked example.
During init, the returned callable will run the given ``init_fn``, and include
the resulting params/state in the outer transform's dictionaries. During
``apply``, the returned callable will instead pull the relevant params/state
from the outer transform's dictionaries.
Must be called inside :func:`transform_with_state`\ , and be passed the
``init`` member of a :class:`TransformedWithState`\ .
The user must ensure that the given ``init`` does not accidentally catch
modules from an outer :func:`transform_with_state` via functional closure.
Example:
>>> def g(x):
... return hk.nets.ResNet50(1)(x, True)
>>> g = hk.transform_with_state(g)
>>> params_and_state_fn, updater = (
... hk.experimental.lift_with_state(g.init, name='f_lift'))
>>> init_rng = hk.next_rng_key() if hk.running_init() else None
>>> x = jnp.ones([1, 224, 224, 3])
>>> params, state = params_and_state_fn(init_rng, x)
>>> out, state = g.apply(params, state, None, x)
>>> updater.update(state)
Args:
init_fn: The ``init`` function from an :class:`TransformedWithState`\ .
name: A string name to prefix parameters with.
Returns:
A callable that during ``init`` injects parameter values into the outer
context and during ``apply`` reuses parameters from the outer context. In
both cases returns parameter values to be used with an ``apply`` function.
The ``init`` function additionally returns an object used to update the
outer context with new state after ``apply`` is called.
"""
base.assert_context("experimental.lift_with_state")
params_and_state_fn = _to_callable(LiftingModule(init_fn, name=name))
updater = LiftWithStateUpdater(name)
return params_and_state_fn, updater
| deepmind/dm-haiku | haiku/_src/lift.py | Python | apache-2.0 | 10,617 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_apm_policy_fetch
short_description: Exports the APM policy or APM access profile from remote nodes.
description:
- Exports the apm policy or APM access profile from remote nodes.
version_added: 2.8
options:
name:
description:
- The name of the APM policy or APM access profile exported to create a file on the remote device for downloading.
required: True
dest:
description:
- A directory to save the file into.
type: path
file:
description:
- The name of the file to be created on the remote device for downloading.
type:
description:
- Specifies the type of item to export from device.
choices:
- profile_access
- access_policy
default: profile_access
force:
description:
- If C(no), the file will only be transferred if it does not exist in the the destination.
default: yes
type: bool
partition:
description:
- Device partition to which contain APM policy or APM access profile to export.
default: Common
notes:
- Due to ID685681 it is not possible to execute ng_* tools via REST api on v12.x and 13.x, once this is fixed
this restriction will be removed.
- Requires BIG-IP >= 14.0.0
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Export APM access profile
bigip_apm_policy_fetch:
name: foobar
file: export_foo
dest: /root/download
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Export APM access policy
bigip_apm_policy_fetch:
name: foobar
file: export_foo
dest: /root/download
type: access_policy
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Export APM access profile, autogenerate name
bigip_apm_policy_fetch:
name: foobar
dest: /root/download
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
name:
description: Name of the APM policy or APM access profile to be exported.
returned: changed
type: str
sample: APM_policy_global
file:
description:
- Name of the exported file on the remote BIG-IP to download. If not
specified, then this will be a randomly generated filename.
returned: changed
type: str
sample: foobar_file
dest:
description: Local path to download exported APM policy.
returned: changed
type: str
sample: /root/downloads/profile-foobar_file.conf.tar.gz
type:
description: Set to specify type of item to export.
returned: changed
type: str
sample: access_policy
'''
import os
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.icontrol import download_file
from library.module_utils.network.f5.icontrol import tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import download_file
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {}
api_attributes = []
returnables = [
'name',
'file',
'dest',
'type',
'force',
]
updatables = []
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
def _item_exists(self):
if self.type == 'access_policy':
uri = 'https://{0}:{1}/mgmt/tm/apm/policy/access-policy/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.partition, self.name)
)
else:
uri = 'https://{0}:{1}/mgmt/tm/apm/profile/access/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.partition, self.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'items' in response and response['items'] != []:
return True
return False
@property
def file(self):
if self._values['file'] is not None:
return self._values['file']
result = next(tempfile._get_candidate_names()) + '.tar.gz'
self._values['file'] = result
return result
@property
def fulldest(self):
result = None
if os.path.isdir(self.dest):
result = os.path.join(self.dest, self.file)
else:
if os.path.exists(os.path.dirname(self.dest)):
result = self.dest
else:
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(result))
except OSError as e:
if "permission denied" in str(e).lower():
raise F5ModuleError(
"Destination directory {0} is not accessible".format(os.path.dirname(result))
)
raise F5ModuleError(
"Destination directory {0} does not exist".format(os.path.dirname(result))
)
if not os.access(os.path.dirname(result), os.W_OK):
raise F5ModuleError(
"Destination {0} not writable".format(os.path.dirname(result))
)
return result
@property
def name(self):
if not self._item_exists():
raise F5ModuleError('The provided {0} with the name {1} does not exist on device.'.format(
self.type, self._values['name'])
)
return self._values['name']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
if not module_provisioned(self.client, 'apm'):
raise F5ModuleError(
"APM must be provisioned to use this module."
)
if self.version_less_than_14():
raise F5ModuleError('Due to bug ID685681 it is not possible to use this module on TMOS version below 14.x')
result = dict()
self.export()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=True))
return result
def version_less_than_14(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('14.0.0'):
return True
return False
def export(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
if not self.want.force:
raise F5ModuleError(
"File '{0}' already exists.".format(self.want.fulldest)
)
self.execute()
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
self.execute()
return True
def download(self):
self.download_from_device(self.want.fulldest)
if os.path.exists(self.want.fulldest):
return True
raise F5ModuleError(
"Failed to download the remote file."
)
def execute(self):
self.download()
self.remove_temp_file_from_device()
return True
def exists(self):
if os.path.exists(self.want.fulldest):
return True
return False
def create_on_device(self):
cmd = 'ng_export -t {0} {1} {2} -p {3}'.format(
self.want.type, self.want.name, self.want.name, self.want.partition
)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'commandResult' in response:
raise F5ModuleError('Item export command failed.')
return True
def _move_file_to_download(self):
if self.want.type == 'access_policy':
item = 'policy'
else:
item = 'profile'
name = '{0}-{1}.conf.tar.gz'.format(item, self.want.name)
move_path = '/shared/tmp/{0} {1}/{2}'.format(
name,
'/ts/var/rest',
self.want.file
)
params = dict(
command='run',
utilCmdArgs=move_path
)
uri = "https://{0}:{1}/mgmt/tm/util/unix-mv/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
if 'commandResult' in response:
if 'cannot stat' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def download_from_device(self, dest):
url = 'https://{0}:{1}/mgmt/tm/asm/file-transfer/downloads/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.file
)
try:
download_file(self.client, url, dest)
except F5ModuleError:
raise F5ModuleError(
"Failed to download the file."
)
if os.path.exists(self.want.dest):
return True
return False
def remove_temp_file_from_device(self):
tpath_name = '/ts/var/rest/{0}'.format(self.want.file)
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs=tpath_name
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True,
),
dest=dict(
type='path'
),
type=dict(
default='profile_access',
choices=['profile_access', 'access_policy']
),
file=dict(),
force=dict(
default='yes',
type='bool'
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| Jorge-Rodriguez/ansible | lib/ansible/modules/network/f5/bigip_apm_policy_fetch.py | Python | gpl-3.0 | 15,623 |
import os
import sys
class CommandLine():
def __inti__(self):
self.data = " "
def QUIT(self):
sys.exit()
def MKDIR(self,newpath):
""" Creates a directory using filename or path """
if not os.path.exists(newpath):
os.makedirs(newpath)
return "Created {0}.".format(newpath)
else:
return "ERROR: {0} already exists.".format(newpath)
def MKFILE(self,filename):
""" Creates a file using a filename or path """
# FIX ME: WILL NOT CREATE FILE
if not os.path.exists(filename):
f= open(filename,"w+")
f.close()
return "Created {0}.".format(filename)
else:
return "ERROR: {0} already exists.".format(filename)
def LIST(self):
""" Prints out every file and directory in current directory """
path = current_directory()
result = ""
for root, dirs, files in os.walk(path):
#print("FILES: {0}".format(files))
#print("+++")
#print("DIRS: {0}".format(dirs))
for i in dirs:
files.append(i)
S_files = sorted(files, key=str.lower)
for file in S_files:
#sys.stdout.write("{0} ".format(file))
result += "{0} ".format(file)
return result
def PRINT(self,message):
""" Print out to the screen a message """
message = message.split(" ")
del message[0]
return " ".join(message)
def MOVE(filename,directory):
return "Moved: {0} to {1}".format(filename, directory)
def COPY(self,filename,new_filename):
return "Copied: {0} to {1}".format(filename, new_filename)
def ENTER(self,directory):
return "Directory: {0}".format(directory)
def MSG(self,data, index):
return data.split(" ")[index]
def current_directory(self):
return os.path.dirname(os.path.abspath(__file__))
def check(self):
""" Checks if the user input command and then calls the command functions """
command = self.data.split(" ")[0]
command = command.lower()
path = "/"
if command == "quit" or command == "exit":
return self.QUIT()
elif command == "print" or command == "echo":
return self.PRINT(data)
elif command == "enter" or command == "cd":
return self.ENTER(MSG(data, 1))
elif command == "list" or command == "ls":
return self.LIST()
elif command == "mkdir":
return self.MKDIR(MSG(data, 1))
elif command == "mkfile" or command == "touch":
return self.MKFILE(MSG(data, 1))
elif command == "move":
return self.MOVE(MSG(data, 1), MSG(data, 2))
elif command == "copy":
return self.COPY(MSG(data, 1), MSG(data, 2))
else:
print("ERROR: No such command exist")
def run():
print("Welcome to Python Command Line:")
while True:
user_input=raw_input(">")
print(self.check(user_input))
| Hellrungj/CSC-412-Networking | Retry_Resend/CommandLine.py | Python | gpl-3.0 | 2,798 |
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
# Try to reproduce a Numeric interaction bug if Numeric is installed.
>>> from bienstman1_ext import *
>>> try: from Numeric import *
... except: pass
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| alexa-infra/negine | thirdparty/boost-python/libs/python/test/bienstman1.py | Python | mit | 675 |
"""!@package grass.script.vector
@brief GRASS Python scripting module (vector functions)
Vector related functions to be used in Python scripts.
Usage:
@code
from grass.script import vector as grass
grass.vector_db(map)
...
@endcode
(C) 2008-2010 by the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
@author Glynn Clements
@author Martin Landa <landa.martin gmail.com>
"""
import os
import types
import __builtin__
from core import *
# run "v.db.connect -g ..." and parse output
def vector_db(map, **args):
"""!Return the database connection details for a vector map
(interface to `v.db.connect -g'). Example:
\code
>>> grass.vector_db('lakes')
{1: {'layer': 1, 'name': '',
'database': '/home/martin/grassdata/nc_spm_08/PERMANENT/dbf/',
'driver': 'dbf', 'key': 'cat', 'table': 'lakes'}}
\endcode
@param map vector map
@param args other v.db.connect's arguments
@return dictionary
"""
s = read_command('v.db.connect', flags = 'g', map = map, fs = ';', **args)
result = {}
for l in s.splitlines():
f = l.split(';')
if len(f) != 5:
continue
if '/' in f[0]:
f1 = f[0].split('/')
layer = f1[0]
name = f1[1]
else:
layer = f[0]
name = ''
result[int(layer)] = {
'layer' : int(layer),
'name' : name,
'table' : f[1],
'key' : f[2],
'database' : f[3],
'driver' : f[4] }
return result
def vector_layer_db(map, layer):
"""!Return the database connection details for a vector map layer.
If db connection for given layer is not defined, fatal() is called.
@param map map name
@param layer layer number
@return parsed output
"""
try:
f = vector_db(map)[int(layer)]
except KeyError:
fatal(_("Database connection not defined for layer %s") % layer)
return f
# run "v.info -c ..." and parse output
def vector_columns(map, layer = None, getDict = True, **args):
"""!Return a dictionary (or a list) of the columns for the
database table connected to a vector map (interface to `v.info
-c').
@code
>>> vector_columns(urbanarea, getDict = True)
{'UA_TYPE': {'index': 4, 'type': 'CHARACTER'}, 'UA': {'index': 2, 'type': 'CHARACTER'}, 'NAME': {'index': 3, 'type': 'CHARACTER'}, 'OBJECTID': {'index': 1, 'type': 'INTEGER'}, 'cat': {'index': 0, 'type': 'INTEGER'}}
>>> vector_columns(urbanarea, getDict = False)
['cat', 'OBJECTID', 'UA', 'NAME', 'UA_TYPE']
@endcode
@param map map name
@param layer layer number or name (None for all layers)
@param getDict True to return dictionary of columns otherwise list of column names is returned
@param args (v.info's arguments)
@return dictionary/list of columns
"""
s = read_command('v.info', flags = 'c', map = map, layer = layer, quiet = True, **args)
if getDict:
result = dict()
else:
result = list()
i = 0
for line in s.splitlines():
ctype, cname = line.split('|')
if getDict:
result[cname] = { 'type' : ctype,
'index' : i }
else:
result.append(cname)
i+=1
return result
# add vector history
def vector_history(map):
"""!Set the command history for a vector map to the command used to
invoke the script (interface to `v.support').
@param map mapname
@return v.support output
"""
run_command('v.support', map = map, cmdhist = os.environ['CMDLINE'])
# run "v.info -t" and parse output
def vector_info_topo(map):
"""!Return information about a vector map (interface to `v.info
-t'). Example:
\code
>>> grass.vector_info_topo('lakes')
{'kernels': 0, 'lines': 0, 'centroids': 15279,
'boundaries': 27764, 'points': 0, 'faces': 0,
'primitives': 43043, 'islands': 7470, 'nodes': 35234, 'map3d': False, 'areas': 15279}
\endcode
@param map map name
@return parsed output
"""
s = read_command('v.info', flags = 't', map = map)
ret = parse_key_val(s, val_type = int)
if 'map3d' in ret:
ret['map3d'] = bool(ret['map3d'])
return ret
# run "v.info -get ..." and parse output
def vector_info(map):
"""!Return information about a vector map (interface to
`v.info'). Example:
\code
>>> grass.vector_info('random_points')
{'comment': '', 'projection': 'x,y', 'creator': 'soeren', 'holes': 0,
'primitives': 20, 'kernels': 0, 'scale': '1:1', 'title': '',
'west': 0.046125489999999998, 'top': 2376.133159, 'boundaries': 0,
'location': 'XYLocation', 'nodes': 0, 'east': 0.97305646000000001,
'source_date': 'Mon Aug 29 10:55:57 2011', 'north': 0.9589993,
'format': 'native', 'faces': 0, 'centroids': 0,
'digitization_threshold': '0.000000', 'islands': 0, 'level': 2,
'mapset': 'test', 'areas': 0, 'name': 'random_points',
'database': '/home/soeren/grassdata', 'bottom': 22.186596999999999,
'lines': 0, 'points': 20, 'map3d': True, 'volumes': 0, 'num_dblinks': 0,
'organization': '', 'south': 0.066047099999999997}
\endcode
@param map map name
@return parsed vector info
"""
s = read_command('v.info', flags = 'get', map = map)
kv = parse_key_val(s)
for k in ['north', 'south', 'east', 'west', 'top', 'bottom']:
kv[k] = float(kv[k])
for k in ['level', 'num_dblinks']:
kv[k] = int(kv[k])
for k in ['nodes', 'points', 'lines', 'boundaries', 'centroids', 'areas', 'islands', \
'faces', 'kernels', 'volumes', 'holes', 'primitives']:
kv[k] = int(kv[k])
if 'map3d' in kv:
kv['map3d'] = bool(kv['map3d'])
return kv
# interface for v.db.select
def vector_db_select(map, layer = 1, **kwargs):
"""!Get attribute data of selected vector map layer.
Function returns list of columns and dictionary of values ordered by
key column value. Example:
\code
>>> print grass.vector_select('lakes')['values'][3]
['3', '19512.86146', '708.44683', '4', '55652', 'LAKE/POND', '39000', '']
\endcode
@param map map name
@param layer layer number
@param kwargs v.db.select options
@return dictionary ('columns' and 'values')
"""
try:
key = vector_db(map = map)[layer]['key']
except KeyError:
error(_('Missing layer %(layer)d in vector map <%(map)s>') % \
{ 'layer' : layer, 'map' : map })
return { 'columns' : [], 'values' : {} }
if 'columns' in kwargs:
if key not in kwargs['columns'].split(','):
# add key column if missing
debug("Adding key column to the output")
kwargs['columns'] += ',' + key
ret = read_command('v.db.select',
map = map,
layer = layer,
fs = '|', **kwargs)
if not ret:
error(_('vector_select() failed'))
return { 'columns' : [], 'values' : {} }
columns = []
values = {}
for line in ret.splitlines():
if not columns:
columns = line.split('|')
key_index = columns.index(key)
continue
value = line.split('|')
key_value = int(value[key_index])
values[key_value] = line.split('|')
return { 'columns' : columns,
'values' : values }
# interface to v.what
def vector_what(map, coord, distance = 0.0):
"""!Query vector map at given locations
To query one vector map at one location
@code
print grass.vector_what(map = 'archsites', coord = (595743, 4925281), distance = 250)
[{'Category': 8, 'Map': 'archsites', 'Layer': 1, 'Key_column': 'cat',
'Database': '/home/martin/grassdata/spearfish60/PERMANENT/dbf/',
'Mapset': 'PERMANENT', 'Driver': 'dbf',
'Attributes': {'str1': 'No_Name', 'cat': '8'},
'Table': 'archsites', 'Type': 'Point', 'Id': 8}]
@endcode
To query one vector map at more locations
@code
for q in grass.vector_what(map = ('archsites', 'roads'), coord = (595743, 4925281),
distance = 250):
print q['Map'], q['Attributes']
archsites {'str1': 'No_Name', 'cat': '8'}
roads {'label': 'interstate', 'cat': '1'}
@endcode
To query more vector maps at one location
@code
for q in grass.vector_what(map = 'archsites', coord = [(595743, 4925281), (597950, 4918898)],
distance = 250):
print q['Map'], q['Attributes']
archsites {'str1': 'No_Name', 'cat': '8'}
archsites {'str1': 'Bob_Miller', 'cat': '22'}
@endcode
@param map vector map(s) to query given as string or list/tuple
@param coord coordinates of query given as tuple (easting, northing) or list of tuples
@param distance query threshold distance (in map units)
@return parsed list
"""
if "LC_ALL" in os.environ:
locale = os.environ["LC_ALL"]
os.environ["LC_ALL"] = "C"
if type(map) in (types.StringType, types.UnicodeType):
map_list = [map]
else:
map_list = map
layer_list = ['-1'] * len(map_list)
coord_list = list()
if type(coord) is types.TupleType:
coord_list.append('%f,%f' % (coord[0], coord[1]))
else:
for e, n in coord:
coord_list.append('%f,%f' % (e, n))
ret = read_command('v.what',
quiet = True,
flags = 'ag',
map = ','.join(map_list),
layer = ','.join(layer_list),
east_north = ','.join(coord_list),
distance = float(distance))
if "LC_ALL" in os.environ:
os.environ["LC_ALL"] = locale
data = list()
if not ret:
return data
dict_attrb = None
for item in ret.splitlines():
try:
key, value = __builtin__.map(lambda x: x.strip(), item.split('=', 1))
except ValueError:
continue
if key in ('East', 'North'):
continue
if key == 'Map':
dict_main = { 'Map' : value }
dict_attrb = None
data.append(dict_main)
continue
else:
if dict_attrb is not None:
dict_attrb[key] = value
else:
if key in ('Category', 'Layer', 'Id'):
dict_main[key] = int(value)
else:
dict_main[key] = value
if key == 'Key_column':
# skip attributes
dict_attrb = dict()
dict_main['Attributes'] = dict_attrb
return data
| AsherBond/MondocosmOS | grass_trunk/lib/python/vector.py | Python | agpl-3.0 | 11,025 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import re
import time
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_5_5_1(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '5 5 1'
golden_devices_required = 1
def on_dialog(self, dialog, title):
if title.startswith('User Input Required'):
body = dialog.find_element_by_id('cnfrmMsg').text
match = re.search(r'(?<=Leader Timeout\[)\d+(?= Seconds\])', body)
if match:
timeout = int(match.group(0)) / 2
else:
timeout = 10
self.dut.stop()
time.sleep(timeout)
self.dut.start()
return False
if __name__ == '__main__':
unittest.main()
| turon/openthread | tools/harness-automation/cases/leader_5_5_1.py | Python | bsd-3-clause | 2,313 |
from __future__ import absolute_import
from celery import shared_task
import datetime
from api.rds_api import RdsApi
import log.log as log
from account.models import Account
from .models import Rds
def sync_rds_north():
try:
account_data = Account.objects.all()
for account in account_data:
rds_all_n = RdsApi(account.tokencn_north_1,'cn-north-1',account.pidcn_north_1).get_instances()
ilist = []
#add rds
for ins in rds_all_n:
Rds.objects.get_or_create(
rds_name=ins['name'],
region='cn-north-1',
rds_id=ins['id'],
rds_host=ins['hostname'],
rds_type=ins['type'],
account_name=account.account_name,
account_id=account.id
)
ilist.append(ins['id'])
#update status,port
Rds.objects.filter(rds_id=ins['id']).update(rds_status=ins['status'])
Rds.objects.filter(rds_id=ins['id']).update(rds_port=ins['dbPort'])
#delete mysql_rds
rds_id_all_n = set(ilist)
rds_id_all_mysql_n = set(list(map(lambda x: x.rds_id,Rds.objects.filter(account_name=account.account_name,region='cn-north-1'))))
delete_rds_mysql_n = list(rds_id_all_mysql_n - rds_id_all_n)
for i in delete_rds_mysql_n:
Rds.objects.filter(rds_id=i).delete()
except Exception as e:
log.logging.error(e)
def deal_north():
try:
account_data = Account.objects.all()
utc_time_now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
for account in account_data:
#delete rds
rds_id_delete_n = list(map(lambda x: x.rds_id, Rds.objects.filter(rds_delete_time__lt=utc_time_now,account_name=account.account_name,region='cn-north-1')))
for ins in rds_id_delete_n:
RdsApi(account.tokencn_north_1,'cn-north-1',account.pidcn_north_1).delete_instances(ins)
except Exception as e:
log.logging.error(e)
def sync_rds_east():
try:
account_data = Account.objects.all()
for account in account_data:
rds_all_e = RdsApi(account.tokencn_east_2,'cn-east-2',account.pidcn_east_2).get_instances()
ilist = []
#add rds
for ins in rds_all_e:
Rds.objects.get_or_create(
rds_name=ins['name'],
region='cn-east-2',
rds_id=ins['id'],
rds_host=ins['hostname'],
rds_type=ins['type'],
account_name=account.account_name,
account_id=account.id
)
ilist.append(ins['id'])
#update status,port
Rds.objects.filter(rds_id=ins['id']).update(rds_status=ins['status'])
Rds.objects.filter(rds_id=ins['id']).update(rds_port=ins['dbPort'])
#delete mysql_rds
rds_id_all_e = set(ilist)
rds_id_all_mysql_e = set(list(map(lambda x: x.rds_id,Rds.objects.filter(account_name=account.account_name,region='cn-east-2'))))
delete_rds_mysql_e = list(rds_id_all_mysql_e - rds_id_all_e)
for i in delete_rds_mysql_e:
Rds.objects.filter(rds_id=i).delete()
except Exception as e:
log.logging.error(e)
def deal_east():
try:
account_data = Account.objects.all()
utc_time_now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
for account in account_data:
#delete rds
rds_id_delete_e = list(map(lambda x: x.rds_id, Rds.objects.filter(rds_delete_time__lt=utc_time_now,account_name=account.account_name,region='cn-east-2')))
for ins in rds_id_delete_e:
RdsApi(account.tokencn_east_2,'cn-east-2',account.pidcn_east_2).delete_instances(ins)
except Exception as e:
log.logging.error(e)
def sync_rds_south():
try:
account_data = Account.objects.all()
for account in account_data:
rds_all_s = RdsApi(account.tokencn_south_1,'cn-south-1',account.pidcn_south_1).get_instances()
ilist = []
#add rds
for ins in rds_all_s:
Rds.objects.get_or_create(
rds_name=ins['name'],
region='cn-south-1',
rds_id=ins['id'],
rds_host=ins['hostname'],
rds_type=ins['type'],
account_name=account.account_name,
account_id=account.id
)
ilist.append(ins['id'])
#update status,port
Rds.objects.filter(rds_id=ins['id']).update(rds_status=ins['status'])
Rds.objects.filter(rds_id=ins['id']).update(rds_port=ins['dbPort'])
#delete mysql_rds
rds_id_all_s = set(ilist)
rds_id_all_mysql_s = set(list(map(lambda x: x.rds_id,Rds.objects.filter(account_name=account.account_name,region='cn-south-1'))))
delete_rds_mysql_s = list(rds_id_all_mysql_s - rds_id_all_s)
for i in delete_rds_mysql_s:
Rds.objects.filter(rds_id=i).delete()
except Exception as e:
log.logging.error(e)
def deal_south():
try:
account_data = Account.objects.all()
utc_time_now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
for account in account_data:
#delete rds
rds_id_delete_s = list(map(lambda x: x.rds_id, Rds.objects.filter(rds_delete_time__lt=utc_time_now,account_name=account.account_name,region='cn-south-1')))
for ins in rds_id_delete_s:
RdsApi(account.tokencn_south_1,'cn-south-1',account.pidcn_south_1).delete_instances(ins)
except Exception as e:
log.logging.error(e)
@shared_task
def rds_task():
sync_rds_north()
deal_north()
sync_rds_east()
deal_east()
sync_rds_south()
deal_south()
| hyperwd/hwcram | rds/tasks.py | Python | mit | 6,135 |
import sys
import MySQLdb
import rospy
import config_database
## Database class
class SensorsInDatabase(object):
## Constructor
def __init__(self):
# Get the parameters from the config file
self._sensorTable = config_database.database['mysql_sensor_table_name']
self._sensorTypesTable = config_database.database['mysql_sensor_types_table_name']
self._sql = Database()
## Gets all sensors from the database
def get_all_sensors(self):
query = "SELECT * FROM %s" % self._sensorTable
return self._sql.execute_and_return(query)
## Gets all sensor types from the database
def get_all_sensor_types(self):
query = "SELECT * FROM %s" % self._sensorTypesTable
return self._sql.execute_and_return(query)
## Updates sensor value in the database
def update_sensor_value(self, sensor):
return self._sql.update_sensor_value(sensor)
## Database class
class Database(object):
## Constructor
def __init__(self, hostname=None, username=None, password=None, database=None):
self._host = hostname or config_database.database['mysql_server']
self._pass = password or config_database.database['mysql_password']
self._user = username or config_database.database['mysql_user']
self._db_url = database or config_database.database['mysql_db']
self._db = MySQLdb.connect(self._host, self._user, self._pass, self._db_url)
## Executes query on database
def execute(self, query):
cursor = self._db.cursor()
cursor.execute(query)
self._db.commit()
## Executes query and returns results
def execute_and_return(self, query):
cursor = self._db.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query)
return cursor.fetchall()
## Updates sensor value in the database
def update_sensor_value(self, sensor):
query = "UPDATE Sensor SET value='%s', interpreted_value='%s', last_interpreted_value='%s', last_updated='%s' WHERE id='%s' " % (
sensor['value'],
sensor['interpreted_value'],
sensor['last_interpreted_value'],
sensor['last_updated'],
sensor['id'])
try:
self.execute(query)
return True
except Exception as e:
print "[ERROR] - exception raised while updating sensor information in database: %s" % e
return False
| robertjacobs/zuros | zuros_sequencer/zuros_zwave_poller/src/include/database.py | Python | mit | 2,346 |
# -*- coding: utf-8 -*-
"""
Created on Tue May 23 17:11:23 2017
@author: Jak
"""
import math
import mathutils
import bgeDisp
def makeLeg(L1, L2, L3, resting1, resting2, resting3, label="%s", origin=mathutils.Vector([0, 0, 0])):
"""
make a leg like:
/
/
/
*
\
\
\
___\
resting 1 is wrt to horizontal plane
other angles are wrt above link
"""
tibiaRot = mathutils.Euler((resting1, 0, 0)).to_matrix()
tibiaOrigin = origin
tibia =bgeDisp.makeLink(L1, tibiaOrigin, tibiaRot.to_quaternion(), label % ("tibia",))
femurRot = mathutils.Euler((resting2, 0, 0)).to_matrix() * tibiaRot
femurOriginOffset = mathutils.Vector([0, 0, L1])
femurOriginOffset.rotate(tibiaRot)
femurOrigin = tibiaOrigin + femurOriginOffset
femur = bgeDisp.makeLink(L2, femurOrigin, femurRot.to_quaternion(), label % ("femur",))
ankleRot = mathutils.Euler((resting3, 0, 0)).to_matrix()*femurRot
ankleOriginOffset = mathutils.Vector([0, 0, L2])
ankleOriginOffset.rotate(femurRot)
ankleOrigin = femurOrigin+ankleOriginOffset
ankle = bgeDisp.makeLink(L3, ankleOrigin, ankleRot.to_quaternion(), label % ("ankle",))
legLengths = [5, 7, 2]
legRestingAngles = [math.radians(x) for x in [120, 110, 40]]
W = 5
D = 30
origin_z = 15
legs = []
nameKeys = {}
nameKeys[0, 0] = "backLeft"
nameKeys[0, 1] = "frontLeft"
nameKeys[1, 0] = "backRight"
nameKeys[1, 1] = "frontRight"
for w in range(2):
for d in range(2):
origin= mathutils.Vector([(W/2)*(-1)**w, (D/2)*(-1)**d, origin_z])
name = nameKeys[w, d] + "_%s"
legs.append(makeLeg(*legLengths, *legRestingAngles, name, origin))
#legs = [makeLeg(*legLengths, *legRestingAngles, str(x) + "%s", mathutils.Vector([5*(-1)**x, 0, 0])) for x in range(2)]
#makeLeg(5, 7, 2, math.radians(120), math.radians(110), math.radians(40)) | Jak-o-Shadows/flexspine | simulation/makeRobot.py | Python | mit | 2,050 |
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
from setuptools import setup
sys.path.append(os.path.join('doc', 'common'))
try:
from doctools import build_doc, test_doc
except ImportError:
build_doc = test_doc = None
from distutils.cmd import Command
class import_cldr(Command):
description = 'imports and converts the CLDR data'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
c = subprocess.Popen([sys.executable, 'scripts/download_import_cldr.py'])
c.wait()
setup(
name='Babel',
version='2.0-dev',
description='Internationalization utilities',
long_description=\
"""A collection of tools for internationalizing Python applications.""",
author='Armin Ronacher',
author_email='[email protected]',
license='BSD',
url='http://babel.pocoo.org/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['babel', 'babel.messages', 'babel.localtime'],
package_data={'babel': ['global.dat', 'localedata/*.dat']},
install_requires=[
# This version identifier is currently necessary as
# pytz otherwise does not install on pip 1.4 or
# higher.
'pytz>=0a',
],
cmdclass={'build_doc': build_doc, 'test_doc': test_doc,
'import_cldr': import_cldr},
zip_safe=False,
# Note when adding extractors: builtin extractors we also want to
# work if packages are not installed to simplify testing. If you
# add an extractor here also manually add it to the "extract"
# function in babel.messages.extract.
entry_points="""
[console_scripts]
pybabel = babel.messages.frontend:main
[distutils.commands]
compile_catalog = babel.messages.frontend:compile_catalog
extract_messages = babel.messages.frontend:extract_messages
init_catalog = babel.messages.frontend:init_catalog
update_catalog = babel.messages.frontend:update_catalog
[distutils.setup_keywords]
message_extractors = babel.messages.frontend:check_message_extractors
[babel.checkers]
num_plurals = babel.messages.checkers:num_plurals
python_format = babel.messages.checkers:python_format
[babel.extractors]
ignore = babel.messages.extract:extract_nothing
python = babel.messages.extract:extract_python
javascript = babel.messages.extract:extract_javascript
"""
)
| regisb/babel | setup.py | Python | bsd-3-clause | 2,969 |
DEBUG = True
TEMPLATE_DEBUG = True
API_LIMIT_PER_PAGE = 10000
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'local.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
| timothypage/etor | etor/etor/local_settings.py | Python | mit | 212 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 B1 Systems GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.hypervisors.tables import \
AdminHypervisorsTable
LOG = logging.getLogger(__name__)
class AdminIndexView(tables.DataTableView):
table_class = AdminHypervisorsTable
template_name = 'admin/hypervisors/index.html'
def get_data(self):
hypervisors = []
try:
hypervisors = api.nova.hypervisor_list(self.request)
except:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor list.'))
return hypervisors
| tuskar/tuskar-ui | openstack_dashboard/dashboards/admin/hypervisors/views.py | Python | apache-2.0 | 1,366 |
#!/usr/bin/env python2.7
#
# Copyright 2019 OpenGEE Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Abstraction script for retrieving system information so that other code and
executables can have a platform-independent way of retreiving needed info.
New and existing code should be made to pull information from here so that
it is retrieved in a consistent manner with less code changes needed
elsewhere.
"""
import sys
import os
import socket
def GetHostName(useFqdn=False):
"""Returns the host name, and with FQDN if specified."""
thisHost = "localhost"
if useFqdn:
thisHost = socket.getfqdn()
# If the server is configured correctly getfqdn() works fine, but it might
# take what's in /etc/hosts first, so double check to see if
# hostname is more appropriate to avoid using localhost or something else
if thisHost == "localhost" or thisHost == "127.0.0.1" or thisHost == "127.0.1.1" or "." not in thisHost:
thisHost = socket.gethostname()
else:
# hostname itself could be set to FQDN too, so it could be the same as above
thisHost = socket.gethostname()
return thisHost
def Usage(cmd):
"""Show proper usage for this command."""
print "usage: %s [hostname [-f]]" % cmd
exit(1)
def main(args):
if len(args) < 2:
Usage(args[0])
if args[1] == "hostname":
useFqdn = False
if len(args) > 2:
useFqdn = args[2] == "-f"
print GetHostName(useFqdn)
else:
Usage(args[0])
if __name__ == "__main__":
main(sys.argv)
| tst-eclamar/earthenterprise | earth_enterprise/src/server/wsgi/common/geAbstractionFetcher.py | Python | apache-2.0 | 2,044 |
# Part of Patient Flow.
# See LICENSE file for full copyright and licensing details.
import test_shift_management
| NeovaHealth/patientflow | nh_shift_management/tests/__init__.py | Python | agpl-3.0 | 114 |
# flake8: noqa
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PrivacyLevelTranslation'
db.create_table(u'privacy_privacylevel_translation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['privacy.PrivacyLevel'])),
))
db.send_create_signal(u'privacy', ['PrivacyLevelTranslation'])
# Adding unique constraint on 'PrivacyLevelTranslation', fields ['language_code', 'master']
db.create_unique(u'privacy_privacylevel_translation', ['language_code', 'master_id'])
# Adding model 'PrivacyLevel'
db.create_table(u'privacy_privacylevel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('clearance_level', self.gf('django.db.models.fields.IntegerField')(unique=True)),
))
db.send_create_signal(u'privacy', ['PrivacyLevel'])
# Adding model 'PrivacySetting'
db.create_table(u'privacy_privacysetting', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('level', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['privacy.PrivacyLevel'])),
('field_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
))
db.send_create_signal(u'privacy', ['PrivacySetting'])
def backwards(self, orm):
# Removing unique constraint on 'PrivacyLevelTranslation', fields ['language_code', 'master']
db.delete_unique(u'privacy_privacylevel_translation', ['language_code', 'master_id'])
# Deleting model 'PrivacyLevelTranslation'
db.delete_table(u'privacy_privacylevel_translation')
# Deleting model 'PrivacyLevel'
db.delete_table(u'privacy_privacylevel')
# Deleting model 'PrivacySetting'
db.delete_table(u'privacy_privacysetting')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'privacy.privacylevel': {
'Meta': {'ordering': "['clearance_level']", 'object_name': 'PrivacyLevel'},
'clearance_level': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'privacy.privacyleveltranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'PrivacyLevelTranslation', 'db_table': "u'privacy_privacylevel_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['privacy.PrivacyLevel']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'privacy.privacysetting': {
'Meta': {'object_name': 'PrivacySetting'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['privacy.PrivacyLevel']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['privacy'] | bitmazk/django-privacy | privacy/migrations/0001_initial.py | Python | mit | 4,822 |
"""
Numerical matrix multiplication for path integrals.
"""
from itertools import product
import numpy as np
from .constants import HBAR
from .tools import cached
class PIMM:
"""
Path Integrals via Matrix Multiplication
Base class for various kinds of path integral implementations.
"""
def __init__(self, masses: '[g/mol]', grid_ranges: '[nm]',
grid_lens: '[1]', pot_f: '[nm] -> kJ/mol',
beta: 'mol/kJ', num_links: '1'):
"""
Note:
When pot_f receives an N-dimensional array as input, it needs to map
over it, returning an (N-1)-dimensional array.
Note:
The "particles" are actually any Cartesian degrees of freedom. One
might have the same configuration (masses and grids) for a
3-dimensional 1-particle system as for a 1-dimensional 3-particle
system. Of course, the coordinate arrays must be interpreted
appropriately in each case (whether by the potential function or by
the user of the output density).
Parameters:
masses: Masses of the particles.
grid_ranges: Where the grids are truncated. Each grid is symmetric
about the origin.
grid_lens: How many points are on the grids.
beta: Propagation length of the entire path.
num_links: Number of links in the entire path.
pot_f: Potential experienced by the particles in some spatial
configuration.
"""
assert len(masses) == len(grid_ranges) == len(grid_lens), \
'Numbers of configuration items must match.'
assert all(m > 0 for m in masses), 'Masses must be positive.'
assert all(gr > 0 for gr in grid_ranges), 'Grids must have positive lengths.'
assert all(gl >= 2 for gl in grid_lens), 'Grids must have at least two points.'
assert beta > 0, 'Beta must be positive.'
assert num_links >= 2, 'Must have at least two links.'
self._masses = np.array(masses)
self._grid_ranges = np.array(grid_ranges)
self._grid_lens = np.array(grid_lens)
self._pot_f = pot_f
self._beta = beta
self._num_links = num_links
# For cached decorator.
self._cached = {}
@property
def masses(self) -> '[g/mol]':
return self._masses
@property
def grid_ranges(self) -> '[nm]':
return self._grid_ranges
@property
def grid_lens(self) -> '[1]':
return self._grid_lens
@property
def pot_f(self) -> '[nm] -> kJ/mol':
return self._pot_f
@property
def beta(self) -> 'mol/kJ':
return self._beta
@property
def num_links(self) -> '1':
return self._num_links
@property
@cached
def tau(self) -> 'mol/kJ':
"""
High-temperature propagator length.
"""
return self.beta / self.num_links
@property
@cached
def num_points(self) -> '1':
"""
Number of points in the coordinate vector.
"""
return np.prod(self.grid_lens)
@property
@cached
def grid(self) -> '[[nm]]':
"""
Vector of the positions corresponding to the grid points.
This is not a vector in the sense of a 1-dimensional array, because
each element is itself a vector of coordinates for each particle.
However, it can be thought of as the tensor product of the
1-dimensional position vectors.
"""
grids = [np.linspace(-gr, gr, gl) for (gr, gl) in zip(self.grid_ranges, self.grid_lens)]
result = np.array(list(product(*grids)))
assert result.shape == (self.num_points, len(self.masses))
return result
@property
@cached
def volume_element(self) -> 'nm^N':
"""
Effective volume taken up by each grid point.
"""
return np.prod(2 * self.grid_ranges / (self.grid_lens - 1))
@property
@cached
def pot_f_grid(self) -> '[kJ/mol]':
"""
Potential function evaluated on the grid.
"""
return self.pot_f(self.grid)
@property
@cached
def rho_tau(self) -> '[[1/nm^N]]':
"""
Matrix for the high-temperature propagator.
"""
prefactors_K = self.masses / (2 * HBAR * HBAR * self.tau) # [1/nm^2]
prefactor_V = self.tau / 2 # mol/kJ
prefactor_front = np.sqrt(np.prod(prefactors_K) / np.pi) # 1/nm^N
K = np.empty((self.num_points, self.num_points)) # [[nm^2]]
V = np.empty_like(K) # [[kJ/mol]]
for i, q_i in enumerate(self.grid):
for j, q_j in enumerate(self.grid):
K[i, j] = np.sum(prefactors_K * (q_i - q_j) ** 2)
V[i, j] = self.pot_f_grid[i] + self.pot_f_grid[j]
return prefactor_front * np.exp(-K - prefactor_V * V)
@property
def density_diagonal(self):
raise NotImplementedError()
def expectation_value(self, property_f: '[nm] -> X') -> 'X':
"""
Expectation value of property_f.
Note:
This is only implemented for properties that are diagonal in the
position representation.
Note:
When property_f receives an N-dimensional array as input, it should
behave in the same manner as pot_f.
"""
return np.dot(self.density_diagonal, property_f(self.grid))
class PIFTMM(PIMM):
"""
Path Integral at Finite Temperature via Matrix Multiplication
Calculate the approximate thermal density matrix of a system comprised of
one or more particles in an arbitrary potential on a discretized and
truncated grid. The density matrix is determined via numerical matrix
multiplication of high-temperature matrices.
"""
@property
@cached
def rho_beta(self) -> '[[1/nm^N]]':
"""
Matrix for the full path propagator.
"""
power = self.num_links - 1
eigvals, eigvecs = np.linalg.eigh(self.volume_element * self.rho_tau)
result = np.dot(np.dot(eigvecs, np.diag(eigvals ** power)), eigvecs.T)
return result / self.volume_element
@property
@cached
def density(self) -> '[[1]]':
"""
Normalized thermal density matrix.
"""
density = self.rho_beta
# Explicitly normalize.
density /= density.diagonal().sum()
return density
@property
@cached
def density_diagonal(self) -> '[1]':
"""
Normalized thermal diagonal density.
"""
return self.density.diagonal()
class PIGSMM(PIMM):
"""
Path Integral Ground State via Matrix Multiplication
Calculate the approximate ground state wavefunction of a system comprised
of one or more particles in an arbitrary potential on a discretized and
truncated grid. The wavefunction is determined via imaginary time
propagation from a trial function using numerical matrix multiplication.
"""
def __init__(self, masses: '[g/mol]', grid_ranges: '[nm]',
grid_lens: '[1]', pot_f: '[nm] -> kJ/mol',
beta: 'mol/kJ', num_links: '1', *,
trial_f: '[nm] -> 1' = None,
trial_f_diffs: '[[nm] -> 1/nm^2]' = None):
"""
See PIMM.__init__ for more details.
Note:
The convention used is that beta represents the entire path, so the
propagation length from the trial function to the middle of the path
is beta/2.
Note:
When trial_f receives an N-dimensional array as input, it should
behave in the same manner as pot_f.
Parameters:
trial_f: Approximation to the ground state wavefunction. If none is
provided, a uniform trial function is used.
trial_f_diffs: Second derivatives of trial_f. One function must be
specified for each particle.
"""
super().__init__(masses, grid_ranges, grid_lens, pot_f, beta, num_links)
assert num_links % 2 == 0, 'Number of links must be even.'
if trial_f is not None:
assert trial_f_diffs is not None, 'Derivatives must be provided.'
assert len(trial_f_diffs) == len(masses), 'Number of derivatives must match.'
self._trial_f = trial_f
self._trial_f_diffs = trial_f_diffs
@property
def trial_f(self) -> '[nm] -> 1':
return self._trial_f
@property
def trial_f_diffs(self) -> '[[nm] -> 1/nm^2]':
return self._trial_f_diffs
@property
@cached
def uniform_trial_f_grid(self) -> '[1]':
"""
Unnormalized uniform trial function evaluated on the grid.
"""
return np.ones(self.num_points)
@property
@cached
def trial_f_grid(self) -> '[1]':
"""
Unnormalized trial function evaluated on the grid.
"""
if self.trial_f is None:
# Default to a uniform trial function.
return self.uniform_trial_f_grid
return self.trial_f(self.grid)
@property
@cached
def uniform_trial_f_diffs_grid(self) -> '[[1/nm^2]]':
"""
Unnormalized uniform trial function derivatives evaluated on the grid.
"""
return np.zeros(self.grid.T.shape)
@property
@cached
def trial_f_diffs_grid(self) -> '[[1/nm^2]]':
"""
Unnormalized trial function derivatives evaluated on the grid.
"""
if self.trial_f is None:
# Default to a uniform trial function.
return self.uniform_trial_f_diffs_grid
result = np.empty(self.grid.T.shape)
for i, f in enumerate(self.trial_f_diffs):
result[i] = f(self.grid)
return result
@property
@cached
def rho_beta_half(self) -> '[[1/nm^N]]':
"""
Matrix for the half path propagator.
"""
power = self.num_links // 2
eigvals, eigvecs = np.linalg.eigh(self.volume_element * self.rho_tau)
result = np.dot(np.dot(eigvecs, np.diag(eigvals ** power)), eigvecs.T)
return result / self.volume_element
@property
@cached
def rho_beta(self) -> '[[1/nm^N]]':
"""
Matrix for the full path propagator.
"""
return self.volume_element * np.dot(self.rho_beta_half, self.rho_beta_half)
@property
@cached
def ground_wf(self) -> '[1]':
"""
Normalized ground state wavefunction.
"""
ground_wf = np.dot(self.rho_beta_half, self.trial_f_grid)
# Explicitly normalize.
ground_wf /= np.sqrt(np.sum(ground_wf ** 2))
return ground_wf
@property
@cached
def density(self) -> '[[1]]':
"""
Normalized ground state density matrix.
"""
return np.outer(self.ground_wf, self.ground_wf)
@property
@cached
def density_diagonal(self) -> '[1]':
"""
Normalized ground state diagonal density.
"""
return self.ground_wf ** 2
@property
@cached
def energy_mixed(self) -> 'kJ/mol':
"""
Ground state energy calculated using the mixed estimator.
"""
ground_wf_full = np.dot(self.rho_beta, self.trial_f_grid) # [1/nm^N]
trial_f_diffs = np.sum(self.trial_f_diffs_grid / self.masses[:, np.newaxis], axis=0) # [mol/g nm^2]
energy_V = np.sum(ground_wf_full * self.pot_f_grid * self.trial_f_grid) # kJ/mol nm^N
energy_K = np.dot(ground_wf_full, trial_f_diffs) # mol/g nm^(N+2)
normalization = np.dot(ground_wf_full, self.trial_f_grid) # 1/nm^N
return (energy_V - 0.5 * HBAR * HBAR * energy_K) / normalization
@property
@cached
def density_reduced(self) -> '[[1]]':
"""
Density matrix for the first particle, with the other traced out.
Only implemented for two-particle systems.
"""
assert len(self.masses) == 2
new_len = self.grid_lens[0]
other_len = self.grid_lens[1]
density_new = np.zeros((new_len, new_len))
for i in range(new_len):
for j in range(new_len):
for t in range(other_len):
# Avoid computing self.density here.
density_new[i, j] += self.ground_wf[other_len * i + t] * self.ground_wf[other_len * j + t]
return density_new
@property
@cached
def trace_renyi2(self) -> '1':
"""
Trace of the square of the reduced density matrix.
The 2nd Rényi entropy is the negative logarithm of this quantity.
"""
return np.linalg.matrix_power(self.density_reduced, 2).trace()
class PIGSIMM(PIGSMM):
"""
Path Integral Ground State via Implicit Matrix Multiplication
Calculate the approximate ground state wavefunction of a system comprised
of one or more particles in an arbitrary potential on a discretized and
truncated grid. The wavefunction is determined via imaginary time
propagation from a trial function using implicit numerical matrix-vector
multiplication, where the full density matrix is never constructed.
"""
@property
def rho_tau(self):
# We don't build any (full) matrices!
raise NotImplementedError()
@property
def rho_beta_half(self):
raise NotImplementedError()
@property
def rho_beta(self):
raise NotImplementedError()
def _propagate_trial(self, start_grid: '[1]', power: '1') -> '[1]':
"""
Multiply start_grid by (rho_tau ** power).
"""
prefactors_K = self.masses / (2 * HBAR * HBAR * self.tau) # [1/nm^2]
pot_exp = np.exp(-0.5 * self.tau * self.pot_f_grid) # [1]
temp_wf1 = start_grid.copy() # [1]
temp_wf2 = np.zeros_like(temp_wf1) # [1]
for _ in range(power):
temp_wf1 *= pot_exp
for q, wf in zip(self.grid, temp_wf1):
# The temporary array here is the same shape as self.grid.
temp_wf2 += np.exp(-np.sum(prefactors_K * (self.grid - q) ** 2, axis=1)) * wf
temp_wf2 *= pot_exp
# Explicitly normalize at each step for stability.
temp_wf1 = temp_wf2 / np.sqrt(np.sum(temp_wf2 ** 2))
temp_wf2 = np.zeros_like(temp_wf1)
return temp_wf1
@property
@cached
def ground_wf(self) -> '[1]':
"""
Normalized ground state wavefunction.
"""
return self._propagate_trial(self.trial_f_grid, self.num_links // 2)
@property
def density(self):
raise NotImplementedError()
@property
@cached
def energy_mixed(self) -> 'kJ/mol':
"""
Ground state energy calculated using the mixed estimator.
"""
ground_wf_full = self._propagate_trial(self.ground_wf, self.num_links // 2) # [1]
trial_f_diffs = np.sum(self.trial_f_diffs_grid / self.masses[:, np.newaxis], axis=0) # [mol/g nm^2]
energy_V = np.sum(ground_wf_full * self.pot_f_grid * self.trial_f_grid) # kJ/mol
energy_K = np.dot(ground_wf_full, trial_f_diffs) # mol/g nm^2
normalization = np.dot(ground_wf_full, self.trial_f_grid) # 1
return (energy_V - 0.5 * HBAR * HBAR * energy_K) / normalization
| 0/pathintmatmult | pathintmatmult/nmm.py | Python | mit | 15,390 |
# Natural Language Toolkit: Sequential Backoff Taggers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (minor additions)
# Tiago Tresoldi <[email protected]> (original affix tagger)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes for tagging sentences sequentially, left to right. The
abastract base class L{SequentialBackoffTagger} serves as the base
class for all the taggers in this module. Tagging of individual words
is performed by the method L{choose_tag()}, which is defined by
subclasses of L{SequentialBackoffTagger}. If a tagger is unable to
determine a tag for the specified token, then its I{backoff tagger} is
consulted instead. Any C{SequentialBackoffTagger} may serve as a
backoff tagger for any other C{SequentialBackoffTagger}.
"""
import re, yaml
from nltk import FreqDist, ConditionalFreqDist
from nltk.tag.api import *
from nltk.tag.util import *
from nltk.internals import deprecated, Deprecated
######################################################################
#{ Abstract Base Classes
######################################################################
class SequentialBackoffTagger(TaggerI):
"""
An abstract base class for taggers that tags words sequentially,
left to right. Tagging of individual words is performed by the
method L{choose_tag()}, which should be defined by subclasses. If
a tagger is unable to determine a tag for the specified token,
then its backoff tagger is consulted.
@ivar _taggers: A list of all the taggers that should be tried to
tag a token (i.e., C{self} and its backoff taggers).
"""
def __init__(self, backoff=None):
if backoff is None:
self._taggers = [self]
else:
self._taggers = [self] + backoff._taggers
def _get_backoff(self):
if len(self._taggers) < 2: return None
else: return self._taggers[1]
backoff = property(_get_backoff, doc='''
The backoff tagger for this tagger.''')
def tag(self, tokens):
# docs inherited from TaggerI
tags = []
for i in range(len(tokens)):
tags.append(self.tag_one(tokens, i, tags))
return zip(tokens, tags)
def tag_one(self, tokens, index, history):
"""
Determine an appropriate tag for the specified token, and
return that tag. If this tagger is unable to determine a tag
for the specified token, then its backoff tagger is consulted.
@rtype: C{str}
@type tokens: C{list}
@param tokens: The list of words that are being tagged.
@type index: C{int}
@param index: The index of the word whose tag should be
returned.
@type history: C{list} of C{str}
@param history: A list of the tags for all words before
C{index}.
"""
tag = None
for tagger in self._taggers:
tag = tagger.choose_tag(tokens, index, history)
if tag is not None: break
return tag
def choose_tag(self, tokens, index, history):
"""
Decide which tag should be used for the specified token, and
return that tag. If this tagger is unable to determine a tag
for the specified token, return C{None} -- do I{not} consult
the backoff tagger. This method should be overridden by
subclasses of C{SequentialBackoffTagger}.
@rtype: C{str}
@type tokens: C{list}
@param tokens: The list of words that are being tagged.
@type index: C{int}
@param index: The index of the word whose tag should be
returned.
@type history: C{list} of C{str}
@param history: A list of the tags for all words before
C{index}.
"""
raise AssertionError('SequentialBackoffTagger is an abstract class')
#////////////////////////////////////////////////////////////
#{ Deprecated
@deprecated('Use batch_tag instead.')
def tag_sents(self, sents, verbose=False):
self.tag_batch(sents, verbose)
class ContextTagger(SequentialBackoffTagger):
"""
An abstract base class for sequential backoff taggers that choose
a tag for a token based on the value of its "context". Different
subclasses are used to define different contexts.
A C{ContextTagger} chooses the tag for a token by calculating the
token's context, and looking up the corresponding tag in a table.
This table can be constructed manually; or it can be automatically
constructed based on a training corpus, using the L{train()}
factory method.
"""
def __init__(self, context_to_tag, backoff=None):
"""
@param context_to_tag: A dictionary mapping contexts to tags.
@param backoff: The backoff tagger that should be used for this tagger.
"""
SequentialBackoffTagger.__init__(self, backoff)
if context_to_tag:
self._context_to_tag = context_to_tag
else:
self._context_to_tag = {}
def context(self, tokens, index, history):
"""
@return: the context that should be used to look up the tag
for the specified token; or C{None} if the specified token
should not be handled by this tagger.
@rtype: (hashable)
"""
raise AssertionError('Abstract base class')
def choose_tag(self, tokens, index, history):
context = self.context(tokens, index, history)
return self._context_to_tag.get(context)
def size(self):
"""
@return: The number of entries in the table used by this
tagger to map from contexts to tags.
"""
return len(self._context_to_tag)
def __repr__(self):
return '<%s: size=%d>' % (self.__class__.__name__, self.size())
def _train(self, tagged_corpus, cutoff=1, verbose=False):
"""
Initialize this C{ContextTagger}'s L{_context_to_tag} table
based on the given training data. In particular, for each
context C{I{c}} in the training data, set
C{_context_to_tag[I{c}]} to the most frequent tag for that
context. However, exclude any contexts that are already
tagged perfectly by the backoff tagger(s).
The old value of C{self._context_to_tag} (if any) is discarded.
@param tagged_corpus: A tagged corpus. Each item should be
a C{list} of C{(word, tag)} tuples.
@param cutoff: If the most likely tag for a context occurs
fewer than C{cutoff} times, then exclude it from the
context-to-tag table for the new tagger.
"""
token_count = hit_count = 0
# A context is considered 'useful' if it's not already tagged
# perfectly by the backoff tagger.
useful_contexts = set()
# Count how many times each tag occurs in each context.
fd = ConditionalFreqDist()
for sentence in tagged_corpus:
tokens, tags = zip(*sentence)
for index, (token, tag) in enumerate(sentence):
# Record the event.
token_count += 1
context = self.context(tokens, index, tags[:index])
if context is None: continue
fd[context].inc(tag)
# If the backoff got it wrong, this context is useful:
if (self.backoff is None or
tag != self.backoff.tag_one(tokens, index, tags[:index])):
useful_contexts.add(context)
# Build the context_to_tag table -- for each context, figure
# out what the most likely tag is. Only include contexts that
# we've seen at least `cutoff` times.
for context in useful_contexts:
best_tag = fd[context].max()
hits = fd[context][best_tag]
if hits > cutoff:
self._context_to_tag[context] = best_tag
hit_count += hits
# Display some stats, if requested.
if verbose:
size = len(self._context_to_tag)
backoff = 100 - (hit_count * 100.0)/ token_count
pruning = 100 - (size * 100.0) / len(fd.conditions())
print "[Trained Unigram tagger:",
print "size=%d, backoff=%.2f%%, pruning=%.2f%%]" % (
size, backoff, pruning)
######################################################################
#{ Tagger Classes
######################################################################
class DefaultTagger(SequentialBackoffTagger, yaml.YAMLObject):
"""
A tagger that assigns the same tag to every token.
"""
yaml_tag = '!nltk.DefaultTagger'
def __init__(self, tag):
"""
Construct a new tagger that assigns C{tag} to all tokens.
"""
self._tag = tag
SequentialBackoffTagger.__init__(self, None)
def choose_tag(self, tokens, index, history):
return self._tag # ignore token and history
def __repr__(self):
return '<DefaultTagger: tag=%s>' % self._tag
class NgramTagger(ContextTagger, yaml.YAMLObject):
"""
A tagger that chooses a token's tag based on its word string and
on the preceeding I{n} word's tags. In particular, a tuple
C{(tags[i-n:i-1], words[i])} is looked up in a table, and the
corresponding tag is returned. N-gram taggers are typically
trained them on a tagged corpus.
"""
yaml_tag = '!nltk.NgramTagger'
def __init__(self, n, train=None, model=None, backoff=None,
cutoff=1, verbose=False):
"""
Train a new C{NgramTagger} using the given training data or
the supplied model. In particular, construct a new tagger
whose table maps from each context C{(tag[i-n:i-1], word[i])}
to the most frequent tag for that context. But exclude any
contexts that are already tagged perfectly by the backoff
tagger.
@param train: A tagged corpus. Each item should be
a C{list} of C{(word, tag)} tuples.
@param backoff: A backoff tagger, to be used by the new
tagger if it encounters an unknown context.
@param cutoff: If the most likely tag for a context occurs
fewer than C{cutoff} times, then exclude it from the
context-to-tag table for the new tagger.
"""
self._n = n
if (train and model) or (not train and not model):
raise ValueError('Must specify either training data or trained model.')
ContextTagger.__init__(self, model, backoff)
if train:
self._train(train, cutoff, verbose)
def context(self, tokens, index, history):
tag_context = tuple(history[max(0,index-self._n+1):index])
return (tag_context, tokens[index])
class UnigramTagger(NgramTagger):
"""
A tagger that chooses a token's tag based its word string.
Unigram taggers are typically trained on a tagged corpus.
"""
yaml_tag = '!nltk.UnigramTagger'
def __init__(self, train=None, model=None, backoff=None,
cutoff=1, verbose=False):
NgramTagger.__init__(self, 1, train, model, backoff, cutoff, verbose)
def context(self, tokens, index, history):
return tokens[index]
class BigramTagger(NgramTagger):
"""
A tagger that chooses a token's tag based its word string and on
the preceeding words' tag. In particular, a tuple consisting
of the previous tag and the word is looked up in a table, and
the corresponding tag is returned. Bigram taggers are typically
trained on a tagged corpus.
"""
yaml_tag = '!nltk.BigramTagger'
def __init__(self, train=None, model=None, backoff=None,
cutoff=1, verbose=False):
NgramTagger.__init__(self, 2, train, model, backoff, cutoff, verbose)
class TrigramTagger(NgramTagger):
"""
A tagger that chooses a token's tag based its word string and on
the preceeding two words' tags. In particular, a tuple consisting
of the previous two tags and the word is looked up in a table, and
the corresponding tag is returned. Trigram taggers are typically
trained them on a tagged corpus.
"""
yaml_tag = '!nltk.TrigramTagger'
def __init__(self, train=None, model=None, backoff=None,
cutoff=1, verbose=False):
NgramTagger.__init__(self, 3, train, model, backoff, cutoff, verbose)
class AffixTagger(ContextTagger, yaml.YAMLObject):
"""
A tagger that chooses a token's tag based on a leading or trailing
substring of its word string. (It is important to note that these
substrings are not necessarily "true" morphological affixes). In
particular, a fixed-length substring of the word is looked up in a
table, and the corresponding tag is returned. Affix taggers are
typically constructed by training them on a tagged corpys; see
L{train()}.
"""
yaml_tag = '!nltk.AffixTagger'
def __init__(self, train=None, model=None, affix_length=-3,
min_stem_length=2, backoff=None, cutoff=1, verbose=False):
"""
Construct a new affix tagger.
@param affix_length: The length of the affixes that should be
considered during training and tagging. Use negative
numbers for suffixes.
@param min_stem_length: Any words whose length is less than
C{min_stem_length+abs(affix_length)} will be assigned a
tag of C{None} by this tagger.
"""
if (train and model) or (not train and not model):
raise ValueError('Must specify either training data or '
'trained model')
ContextTagger.__init__(self, model, backoff)
self._affix_length = affix_length
self._min_word_length = min_stem_length + abs(affix_length)
if train:
self._train(train, cutoff, verbose)
def context(self, tokens, index, history):
token = tokens[index]
if len(token) < self._min_word_length:
return None
elif self._affix_length > 0:
return token[:self._affix_length]
else:
return token[self._affix_length:]
class RegexpTagger(SequentialBackoffTagger, yaml.YAMLObject):
"""
A tagger that assigns tags to words based on regular expressions
over word strings.
"""
yaml_tag = '!nltk.RegexpTagger'
def __init__(self, regexps, backoff=None):
"""
Construct a new regexp tagger.
@type regexps: C{list} of C{(str, str)}
@param regexps: A list of C{(regexp, tag)} pairs, each of
which indicates that a word matching C{regexp} should
be tagged with C{tag}. The pairs will be evalutated in
order. If none of the regexps match a word, then the
optional backoff tagger is invoked, else it is
assigned the tag C{None}.
"""
self._regexps = regexps
SequentialBackoffTagger.__init__(self, backoff)
def choose_tag(self, tokens, index, history):
for regexp, tag in self._regexps:
if re.match(regexp, tokens[index]): # ignore history
return tag
return None
def __repr__(self):
return '<Regexp Tagger: size=%d>' % len(self._regexps)
| hectormartinez/rougexstem | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/tag/sequential.py | Python | apache-2.0 | 15,593 |
import unittest
from pydl.hyperopt.components import *
class ComponentsTestCase(unittest.TestCase):
def test_hp_choice(self):
self.assertRaises(AssertionError, hp_choice, 1)
self.assertRaises(AssertionError, hp_choice, [])
x = hp_choice([1, 2, 3])
self.assertEqual(x.size, 1)
self.assertEqual(x.get_value([0.5]), 2)
a = hp_int(1, 4)
b = hp_int(1, 4)
x = hp_choice([a, b])
self.assertEqual(x.size, 2)
self.assertEqual(x.get_value([0, 0]), 1)
x = hp_choice([hp_choice([a, b]), a, 'lala'])
self.assertEqual(x.size, 3)
self.assertEqual(x.get_value([1, 1, 0]), 'lala')
def test_hp_int(self):
min_v = 1
max_v = 10
self.assertRaises(AssertionError, hp_int, max_v, min_v)
p = hp_int(min_v, max_v)
self.assertEqual(p.size, 1)
self.assertEqual(min_v, p.get_value([0]))
self.assertEqual(max_v, p.get_value([1]))
self.assertEqual(5, p.get_value([0.5]))
def test_hp_float(self):
min_v = 0
max_v = 1
self.assertRaises(AssertionError, hp_float, max_v, min_v)
p = hp_float(min_v, max_v)
self.assertEqual(p.size, 1)
self.assertEqual(min_v, p.get_value([0]))
self.assertEqual(max_v, p.get_value([1]))
self.assertEqual(0.5, p.get_value([0.5]))
def test_hp_space(self):
self.assertRaises(AssertionError, hp_space)
space = hp_space(a=1, b=2)
self.assertEqual(space.size, 0)
expected = {
'a': 1,
'b': 2
}
self.assertDictEqual(expected, space.get_value([0, 0.5, 0, 1]))
def test_hp_space_from_json(self):
hp_space_json = {
"class_name": "MLP",
"config": {
"name": "mlp_opt",
"layers": {
"class_name": "ListNode",
"config": {
"value": [
{
"class_name": "IntParameterNode",
"config": {
"min_val": 10,
"max_val": 100
}
},
{
"class_name": "IntParameterNode",
"config": {
"min_val": 10,
"max_val": 100
}
}
]
}
},
"dropout": {
"class_name": "FloatParameterNode",
"config": {
"min_val": 0,
"max_val": .3
}
}
}
}
space = hp_space_from_json(hp_space_json)
self.assertEqual(space.size, 3)
expected_config = {
'class_name': 'MLP',
'config': {
'dropout': 0.0,
'name': 'mlp_opt',
'layers': [55, 100]
}
}
self.assertDictEqual(expected_config, space.get_value([0, 0.5, 1]))
def test_hp_space_to_json(self):
expected_json = {
"class_name": "MLP",
"config": {
"name": "mlp_opt",
"layers": {
"class_name": "ListNode",
"config": {
"value": [
{
"class_name": "IntParameterNode",
"config": {
"min_val": 10,
"max_val": 100,
"label": ''
}
},
{
"class_name": "IntParameterNode",
"config": {
"min_val": 10,
"max_val": 100,
"label": ''
}
}
],
"label": "layers"
}
},
"dropout": {
"class_name": "FloatParameterNode",
"config": {
"min_val": 0,
"max_val": .3,
"label": "dropout"
}
}
}
}
space = hp_space_from_json(expected_json)
actual_json = space.to_json()
self.assertDictEqual(actual_json, expected_json)
if __name__ == '__main__':
unittest.main()
| rafaeltg/pydl | pydl/hyperopt/unittests/test_components.py | Python | mit | 4,919 |
import sys
from mrjob.job import MRJob
from mrjob.protocol import JSONProtocol
from mrjob.step import MRStep
from GlobalHeader import *
class ShiftGModMat(MRJob):
"""
Shift generalized modularity matrix
"""
INPUT_PROTOCOL = JSONProtocol
OUTPUT_PROTOCOL = JSONProtocol
def mapper_align_by_col(self, key, value):
row = value[0]
col = value[1]
val = value[2]
yield col, abs(val)
def combiner_sum(self, key, value):
yield key, sum(value)
def reducer_sum(self, key, value):
GroupID, col = key.split("_")
val = sum(value)
yield GroupID, val
def reducer_max(self, key, value):
if key.isdigit():
yield key, max(value)
def steps(self):
return (
[
MRStep(
mapper = self.mapper_align_by_col,
combiner= self.combiner_sum,
reducer = self.reducer_sum,
),
MRStep(
reducer = self.reducer_max,
),
]
)
if __name__ == '__main__':
ShiftGModMat.run()
| oraclechang/CommunityDetection | ShiftGModMat.py | Python | gpl-2.0 | 1,188 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from itertools import izip
from .basecase import (BaseTestCase, cqlshlog, dedent, at_a_time, cql,
TEST_HOST, TEST_PORT)
from .cassconnect import (get_test_keyspace, testrun_cqlsh, testcall_cqlsh,
cassandra_cursor, split_cql_commands, quote_name)
from .ansi_colors import (ColoredText, lookup_colorcode, lookup_colorname,
lookup_colorletter, ansi_seq)
CONTROL_C = '\x03'
CONTROL_D = '\x04'
class TestCqlshOutput(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertNoHasColors(self, text, msg=None):
self.assertNotRegexpMatches(text, ansi_seq, msg='ANSI CSI sequence found in %r' % text)
def assertHasColors(self, text, msg=None):
self.assertRegexpMatches(text, ansi_seq, msg=msg)
def assertColored(self, coloredtext, colorname):
wanted_colorcode = lookup_colorcode(colorname)
for num, c in enumerate(coloredtext):
if not c.isspace():
ccolor = c.colorcode()
self.assertEqual(ccolor, wanted_colorcode,
msg='Output text %r (char #%d) is colored %s, not %s'
% (coloredtext, num, lookup_colorname(ccolor), colorname))
def assertColorFromTags(self, coloredtext, tags):
for (char, tag) in izip(coloredtext, tags):
if char.isspace():
continue
if tag.isspace():
tag = 'n' # neutral
self.assertEqual(char.colorcode(), lookup_colorletter(tag),
msg='Coloring mismatch.\nExpected coloring: %s\n'
'Actually got: %s\ncolor code: %s'
% (tags, coloredtext.colored_version(), coloredtext.colortags()))
def assertCqlverQueriesGiveColoredOutput(self, queries_and_expected_outputs,
cqlver=(), **kwargs):
if not isinstance(cqlver, (tuple, list)):
cqlver = (cqlver,)
for ver in cqlver:
self.assertQueriesGiveColoredOutput(queries_and_expected_outputs, cqlver=ver, **kwargs)
def assertQueriesGiveColoredOutput(self, queries_and_expected_outputs,
sort_results=False, **kwargs):
"""
Allow queries and expected output to be specified in structured tuples,
along with expected color information.
"""
with testrun_cqlsh(tty=True, **kwargs) as c:
for query, expected in queries_and_expected_outputs:
cqlshlog.debug('Testing %r' % (query,))
output = c.cmd_and_response(query).lstrip("\r\n")
c_output = ColoredText(output)
pairs = at_a_time(dedent(expected).split('\n'), 2)
outlines = c_output.splitlines()
if sort_results:
if outlines[1].plain().startswith('---'):
firstpart = outlines[:2]
sortme = outlines[2:]
else:
firstpart = []
sortme = outlines
lastpart = []
while sortme[-1].plain() == '':
lastpart.append(sortme.pop(-1))
outlines = firstpart + sorted(sortme, key=lambda c:c.plain()) + lastpart
for (plain, colorcodes), outputline in zip(pairs, outlines):
self.assertEqual(outputline.plain().rstrip(), plain)
self.assertColorFromTags(outputline, colorcodes)
def test_no_color_output(self):
for termname in ('', 'dumb', 'vt100'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('select * from has_value_encoding_errors;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertNoHasColors(c.read_to_next_prompt())
def test_no_prompt_or_colors_output(self):
# CQL queries and number of lines expected in output:
queries = (('select * from has_all_types limit 1;', 5),
('select * from has_value_encoding_errors limit 1;', 6))
for termname in ('', 'dumb', 'vt100', 'xterm'):
cqlshlog.debug('TERM=%r' % termname)
for cql, lines_expected in queries:
output, result = testcall_cqlsh(prompt=None, env={'TERM': termname},
tty=False, input=cql + '\n')
output = output.splitlines()
for line in output:
self.assertNoHasColors(line)
self.assertNotRegexpMatches(line, r'^cqlsh\S*>')
self.assertEqual(len(output), lines_expected,
msg='output: %r' % '\n'.join(output))
self.assertEqual(output[0], '')
self.assertNicelyFormattedTableHeader(output[1])
self.assertNicelyFormattedTableRule(output[2])
self.assertNicelyFormattedTableData(output[3])
self.assertEqual(output[4].strip(), '')
def test_color_output(self):
for termname in ('xterm', 'unknown-garbage'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('select * from has_value_encoding_errors;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertHasColors(c.read_to_next_prompt())
def test_count_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select count(*) from has_all_types;', """
count
MMMMM
-------
4
G
"""),
('select COUNT(*) FROM empty_table;', """
count
MMMMM
-------
0
G
"""),
('select COUNT(*) FROM empty_composite_table;', """
count
MMMMM
-------
0
G
"""),
('select COUNT(*) FROM twenty_rows_table limit 10;', """
count
MMMMM
-------
10
GG
"""),
('select COUNT(*) FROM twenty_rows_table limit 1000000;', """
count
MMMMM
-------
20
GG
"""),
), cqlver=(2, 3))
# different results cql2/cql3
q = 'select COUNT(*) FROM twenty_rows_composite_table limit 1000000;'
self.assertQueriesGiveColoredOutput((
(q, """
count
MMMMM
-------
1
GG
"""),
), cqlver=2)
self.assertQueriesGiveColoredOutput((
(q, """
count
MMMMM
-------
20
GG
"""),
), cqlver=3)
def test_dynamic_cf_output(self):
self.assertQueriesGiveColoredOutput((
('select * from dynamic_columns;', """
somekey,1 | 1.2,one point two
nMMMMMMM G MMM YYYYYYYYYYYYY
somekey,2 | 2.3,two point three
MMMMMMM G MMM YYYYYYYYYYYYYYY
somekey,3 | -0.0001,negative ten thousandth | 3.46,three point four six | 99,ninety-nine point oh
MMMMMMM G MMMMMMM YYYYYYYYYYYYYYYYYYYYYYY MMMM YYYYYYYYYYYYYYYYYYYY n MM YYYYYYYYYYYYYYYYYYYY
"""),
('select somekey, 2.3 from dynamic_columns;', """
somekey | 2.3
MMMMMMM MMM
---------+-----------------
1 | null
G RRRR
2 | two point three
G YYYYYYYYYYYYYYY
3 | null
G RRRR
"""),
('select first 1 * from dynamic_columns where somekey = 2;', """
2.3
MMM
-----------------
two point three
YYYYYYYYYYYYYYY
"""),
('select * from dynamic_columns where somekey = 2;', """
somekey | 2.3
MMMMMMM MMM
---------+-----------------
2 | two point three
G YYYYYYYYYYYYYYY
"""),
), cqlver=2, sort_results=True)
def test_static_cf_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select a, b from twenty_rows_table where a in ('1', '13', '2');", """
a | b
MM MM
----+----
1 | 1
YY YY
13 | 13
YY YY
2 | 2
YY YY
"""),
), cqlver=(2, 3))
self.assertQueriesGiveColoredOutput((
('select * from dynamic_columns;', """
somekey | column1 | value
MMMMMMM MMMMMMM MMMMM
---------+---------+-------------------------
1 | 1.2 | one point two
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
2 | 2.3 | two point three
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 99 | ninety-nine point oh
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 3.46 | three point four six
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | -0.0001 | negative ten thousandth
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
"""),
), cqlver=3, sort_results=True)
def test_empty_cf_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select * from empty_table;', """
"""),
), cqlver=(2, 3))
q = 'select * from has_all_types where num = 999;'
self.assertQueriesGiveColoredOutput((
(q, """
num
MMM
-----
999
GGG
"""),
), cqlver=2)
# same query should show up as empty in cql 3
self.assertQueriesGiveColoredOutput((
(q, """
"""),
), cqlver=3)
def test_columnless_key_output(self):
q = "select a from twenty_rows_table where a in ('1', '2', '-9192');"
self.assertQueriesGiveColoredOutput((
(q, """
a
MMMMM
-------
1
YYYYY
2
YYYYY
-9192
YYYYY
"""),
), cqlver=2)
self.assertQueriesGiveColoredOutput((
(q, """
a
M
---
1
Y
2
Y
"""),
), cqlver=3)
def test_numeric_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('''select intcol, bigintcol, varintcol \
from has_all_types \
where num in (0, 1, 2, 3);''', """
intcol | bigintcol | varintcol
MMMMMM MMMMMMMMM MMMMMMMMM
-------------+----------------------+-----------------------------
-12 | 1234567890123456789 | 10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
2147483647 | 9223372036854775807 | 9
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
0 | 0 | 0
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
-2147483648 | -9223372036854775808 | -10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
"""),
('''select decimalcol, doublecol, floatcol \
from has_all_types \
where num in (0, 1, 2, 3);''', """
decimalcol | doublecol | floatcol
MMMMMMMMMM MMMMMMMMM MMMMMMMM
------------------+-----------+----------
19952.11882 | 1 | -2.1
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
1E-14 | 1e+07 | 1e+05
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
0.0 | 0 | 0
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
10.0000000000000 | -1004.1 | 1e+08
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
"""),
), cqlver=(2, 3))
self.assertQueriesGiveColoredOutput((
('''select * from dynamic_columns where somekey = 3;''', """
somekey | -0.0001 | 3.46 | 99
MMMMMMM MMMMMMM MMMM MM
---------+-------------------------+----------------------+----------------------
3 | negative ten thousandth | three point four six | ninety-nine point oh
G YYYYYYYYYYYYYYYYYYYYYYY YYYYYYYYYYYYYYYYYYYY YYYYYYYYYYYYYYYYYYYY
"""),
), cqlver=2)
def test_timestamp_output(self):
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
--------------------------
2012-05-14 12:53:20+0000
GGGGGGGGGGGGGGGGGGGGGGGG
"""),
), env={'TZ': 'Etc/UTC'})
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
--------------------------
2012-05-14 07:53:20-0500
GGGGGGGGGGGGGGGGGGGGGGGG
"""),
), env={'TZ': 'EST'})
def test_boolean_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select num, booleancol from has_all_types where num in (0, 1, 2, 3);', """
num | booleancol
MMM MMMMMMMMMM
-----+------------
0 | True
G GGGGG
1 | True
G GGGGG
2 | False
G GGGGG
3 | False
G GGGGG
"""),
), cqlver=(2, 3))
def test_null_output(self):
# column with metainfo but no values
self.assertCqlverQueriesGiveColoredOutput((
("select k, c, notthere from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
M M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
"""),
), cqlver=(2, 3))
# all-columns, including a metainfo column has no values (cql3)
self.assertQueriesGiveColoredOutput((
("select * from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
M M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
"""),
), cqlver=3)
# all-columns, including a metainfo column has no values (cql2)
self.assertQueriesGiveColoredOutput((
("select * from undefined_values_table where k in ('k1', 'k2');", """
k | c
M M
----+----
k1 | c1
YY YY
k2 | c2
YY YY
"""),
), cqlver=2)
# column not in metainfo, which has no values (invalid query in cql3)
self.assertQueriesGiveColoredOutput((
("select num, fakecol from has_all_types where num in (1, 2);", """
num
MMM
-----
1
GGG
2
GGG
"""),
), cqlver=2)
def test_string_output_ascii(self):
self.assertCqlverQueriesGiveColoredOutput((
("select * from ascii_with_invalid_and_special_chars where k in (0, 1, 2, 3, 4);", r"""
k | val
M MMM
---+-----------------------------------------------
0 | newline:\n
G YYYYYYYYmm
1 | return\rand null\x00!
G YYYYYYmmYYYYYYYYmmmmY
2 | \x00\x01\x02\x03\x04\x05control chars\x06\x07
G mmmmmmmmmmmmmmmmmmmmmmmmYYYYYYYYYYYYYmmmmmmmm
3 | \xfe\xffbyte order mark
G mmmmmmmmYYYYYYYYYYYYYYY
4 | fake special chars\x00\n
G YYYYYYYYYYYYYYYYYYYYYYYY
"""),
), cqlver=(2, 3))
def test_string_output_utf8(self):
# many of these won't line up visually here, to keep the source code
# here ascii-only. note that some of the special Unicode characters
# here will render as double-width or zero-width in unicode-aware
# terminals, but the color-checking machinery here will still treat
# it as one character, so those won't seem to line up visually either.
self.assertCqlverQueriesGiveColoredOutput((
("select * from utf8_with_special_chars where k in (0, 1, 2, 3, 4, 5, 6);", u"""
k | val
M MMM
---+-------------------------------
0 | Normal string
G YYYYYYYYYYYYY
1 | Text with\\nnewlines\\n
G YYYYYYYYYmmYYYYYYYYmm
2 | Text with embedded \\x01 char
G YYYYYYYYYYYYYYYYYYYmmmmYYYYY
3 | \u24c8\u24c5\u24ba\u24b8\u24be\u24b6\u24c1\u2008\u249e\u24a3\u249c\u24ad\u24ae and normal ones
G YYYYYYYYYYYYYYYYYYYYYYYYYYYYY
4 | double wides: \u2f91\u2fa4\u2f9a
G YYYYYYYYYYYYYYYYY
5 | zero width\u200bspace
G YYYYYYYYYYYYYYYY
6 | fake special chars\\x00\\n
G YYYYYYYYYYYYYYYYYYYYYYYY
""".encode('utf-8')),
), cqlver=(2, 3), env={'LANG': 'en_US.UTF-8'})
def test_blob_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select num, blobcol from has_all_types where num in (0, 1, 2, 3);", r"""
num | blobcol
MMM MMMMMMM
-----+----------------------
0 | 0x000102030405fffefd
G mmmmmmmmmmmmmmmmmmmm
1 | 0xffffffffffffffffff
G mmmmmmmmmmmmmmmmmmmm
2 | 0x
G mmmmmmmmmmmmmmmmmmmm
3 | 0x80
G mmmmmmmmmmmmmmmmmmmm
"""),
), cqlver=(2, 3))
def test_colname_decoding_errors(self):
# not clear how to achieve this situation in the first place. the
# validator works pretty well, and we can't change the comparator
# after insertion.
#
# guess we could monkey-patch cqlsh or python-cql source to
# explicitly generate an exception on the deserialization of type X..
pass
def test_colval_decoding_errors(self):
self.assertCqlverQueriesGiveColoredOutput((
("select * from has_value_encoding_errors;", r"""
pkey | utf8col
MMMM MMMMMMM
------+--------------------
A | '\x00\xff\x00\xff'
Y RRRRRRRRRRRRRRRRRR
Failed to decode value '\x00\xff\x00\xff' (for column 'utf8col') as text: 'utf8' codec can't decode byte 0xff in position 1: invalid start byte
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
"""),
), cqlver=(2, 3))
def test_key_decoding_errors(self):
self.assertCqlverQueriesGiveColoredOutput((
("select * from has_key_encoding_errors;", r"""
pkey | col
MMMM MMM
--------------------+----------
'\x00\xff\x02\x8f' | whatever
RRRRRRRRRRRRRRRRRR YYYYYYYY
Failed to decode value '\x00\xff\x02\x8f' (for column 'pkey') as text: 'utf8' codec can't decode byte 0xff in position 1: invalid start byte
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
"""),
), cqlver=(2, 3))
def test_prompt(self):
with testrun_cqlsh(tty=True, keyspace=None, cqlver=2) as c:
self.assertEqual(c.output_header.splitlines()[-1], 'cqlsh> ')
c.send('\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, '\ncqlsh> ')
cmd = "USE '%s';\n" % get_test_keyspace().replace("'", "''")
c.send(cmd)
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, '%scqlsh:%s> ' % (cmd, get_test_keyspace()))
c.send('use system;\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, 'use system;\ncqlsh:system> ')
c.send('use NONEXISTENTKEYSPACE;\n')
outputlines = c.read_to_next_prompt().splitlines()
self.assertEqual(outputlines[0], 'use NONEXISTENTKEYSPACE;')
self.assertEqual(outputlines[3], 'cqlsh:system> ')
midline = ColoredText(outputlines[1])
self.assertEqual(midline.plain(),
"Bad Request: Keyspace 'NONEXISTENTKEYSPACE' does not exist")
self.assertColorFromTags(midline,
"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR")
def check_describe_keyspace(self, fullcqlver):
with testrun_cqlsh(tty=True, cqlver=fullcqlver) as c:
ks = get_test_keyspace()
qks = quote_name(fullcqlver, ks)
for cmd in ('describe keyspace', 'desc keyspace'):
for givename in ('system', '', qks):
for semicolon in ('', ';'):
fullcmd = cmd + (' ' if givename else '') + givename + semicolon
desc = c.cmd_and_response(fullcmd)
self.check_describe_keyspace_output(desc, givename or qks, fullcqlver)
# try to actually execute that last keyspace description, with a
# new keyspace name
new_ks_name = 'COPY_OF_' + ks
copy_desc = desc.replace(ks, new_ks_name)
statements = split_cql_commands(copy_desc, cqlver=fullcqlver)
do_drop = True
with cassandra_cursor(cql_version=fullcqlver) as curs:
try:
for stmt in statements:
cqlshlog.debug('TEST EXEC: %s' % stmt)
try:
curs.execute(stmt)
except cql.ProgrammingError, e:
# expected sometimes under cql3, since some existing
# tables made under cql2 are not recreatable with cql3
for errmsg in ('No definition found that is not part of the PRIMARY KEY',
"mismatched input '<' expecting EOF"):
if errmsg in str(e):
do_drop = False
break
else:
raise
# maybe we should do some checks that the two keyspaces
# match up as much as we expect at this point?
finally:
curs.execute('use system')
if do_drop:
curs.execute('drop keyspace %s' % quote_name(fullcqlver, new_ks_name))
def check_describe_keyspace_output(self, output, qksname, fullcqlver):
expected_bits = [r'(?im)^CREATE KEYSPACE %s WITH\b' % re.escape(qksname),
r'(?im)^USE \S+;$',
r';\s*$']
if fullcqlver == '3.0.0':
expected_bits.append(r'\breplication = {\n \'class\':')
else:
expected_bits.append(r'\bstrategy_class =')
for expr in expected_bits:
self.assertRegexpMatches(output, expr)
def test_describe_keyspace_output(self):
for v in ('2.0.0', '3.0.0'):
self.check_describe_keyspace(v)
def test_describe_columnfamily_output(self):
# we can change these to regular expressions if/when it makes sense
# to do so; these will likely be subject to lots of adjustments.
table_desc2 = dedent("""
CREATE TABLE has_all_types (
num int PRIMARY KEY,
intcol int,
timestampcol timestamp,
floatcol float,
uuidcol uuid,
bigintcol bigint,
doublecol double,
booleancol boolean,
decimalcol decimal,
asciicol ascii,
blobcol blob,
varcharcol text,
textcol text,
varintcol varint
) WITH
comment='' AND
comparator=text AND
read_repair_chance=0.100000 AND
gc_grace_seconds=864000 AND
default_validation=text AND
min_compaction_threshold=4 AND
max_compaction_threshold=32 AND
replicate_on_write='true' AND
compaction_strategy_class='SizeTieredCompactionStrategy' AND
compression_parameters:sstable_compression='LZ4Compressor';
""")
# note columns are now comparator-ordered instead of original-order.
table_desc3 = dedent("""
CREATE TABLE has_all_types (
num int PRIMARY KEY,
asciicol ascii,
bigintcol bigint,
blobcol blob,
booleancol boolean,
decimalcol decimal,
doublecol double,
floatcol float,
intcol int,
textcol text,
timestampcol timestamp,
uuidcol uuid,
varcharcol text,
varintcol varint
) WITH COMPACT STORAGE AND
bloom_filter_fp_chance=0.010000 AND
caching='KEYS_ONLY' AND
comment='' AND
dclocal_read_repair_chance=0.000000 AND
gc_grace_seconds=864000 AND
read_repair_chance=0.100000 AND
replicate_on_write='true' AND
populate_io_cache_on_flush='false' AND
compaction={'class': 'SizeTieredCompactionStrategy'} AND
compression={'sstable_compression': 'LZ4Compressor'};
""")
for v in ('2.0.0', '3.0.0'):
with testrun_cqlsh(tty=True, cqlver=v) as c:
for cmdword in ('describe table', 'desc columnfamily'):
for semicolon in (';', ''):
output = c.cmd_and_response('%s has_all_types%s' % (cmdword, semicolon))
self.assertNoHasColors(output)
self.assertEqual(output, {'2.0.0': table_desc2, '3.0.0': table_desc3}[v])
def test_describe_columnfamilies_output(self):
output_re = r'''
\n
Keyspace [ ] (?P<ksname> \S+ ) \n
-----------* \n
(?P<cfnames> .*? )
\n
'''
ks = get_test_keyspace()
with testrun_cqlsh(tty=True, keyspace=None, cqlver=3) as c:
# when not in a keyspace
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
ksnames = []
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '(?xs) ^ ( %s )+ $' % output_re)
for section in re.finditer('(?xs)' + output_re, output):
ksname = section.group('ksname')
ksnames.append(ksname)
cfnames = section.group('cfnames')
self.assertNotIn('\n\n', cfnames)
if ksname == ks:
self.assertIn('ascii_with_invalid_and_special_chars', cfnames)
self.assertIn('system', ksnames)
self.assertIn(quote_name('3.0.0', ks), ksnames)
# when in a keyspace
c.send('USE %s;\n' % quote_name('3.0.0', ks))
c.read_to_next_prompt()
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertEqual(output[0], '\n')
self.assertEqual(output[-1], '\n')
self.assertNotIn('Keyspace %s' % quote_name('3.0.0', ks), output)
self.assertIn('has_value_encoding_errors', output)
self.assertIn('undefined_values_table', output)
def test_describe_cluster_output(self):
output_re = r'''(?x)
^
\n
Cluster: [ ] (?P<clustername> .* ) \n
Partitioner: [ ] (?P<partitionername> .* ) \n
Snitch: [ ] (?P<snitchname> .* ) \n
\n
'''
ringinfo_re = r'''
Range[ ]ownership: \n
(
[ ] .*? [ ][ ] \[ ( \d+ \. ){3} \d+ \] \n
)+
\n
'''
with testrun_cqlsh(tty=True, keyspace=None, cqlver=3) as c:
# not in a keyspace
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + '$')
c.send('USE %s;\n' % quote_name('3.0.0', get_test_keyspace()))
c.read_to_next_prompt()
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + ringinfo_re + '$')
def test_describe_schema_output(self):
with testrun_cqlsh(tty=True) as c:
for semicolon in ('', ';'):
output = c.cmd_and_response('desc schema' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '^\nCREATE KEYSPACE')
self.assertIn("\nCREATE KEYSPACE system WITH replication = {\n 'class': 'LocalStrategy'\n};\n",
output)
self.assertRegexpMatches(output, ';\s*$')
def test_show_output(self):
with testrun_cqlsh(tty=True) as c:
output = c.cmd_and_response('show version;')
self.assertRegexpMatches(output,
'^\[cqlsh \S+ \| Cassandra \S+ \| CQL spec \S+ \| Thrift protocol \S+\]$')
output = c.cmd_and_response('show host;')
self.assertHasColors(output)
self.assertRegexpMatches(output, '^Connected to .* at %s:%d\.$'
% (re.escape(TEST_HOST), TEST_PORT))
def test_show_assumptions_output(self):
expected_output = '\nUSE %s;\n\n' % quote_name('', get_test_keyspace())
with testrun_cqlsh(tty=True) as c:
output = c.cmd_and_response('show assumptions')
self.assertEqual(output, 'No overrides.\n')
c.cmd_and_response('assume dynamic_values VALUES aRe uuid;')
expected_output += 'ASSUME dynamic_values VALUES ARE uuid;\n'
output = c.cmd_and_response('show assumptions')
self.assertEqual(output, expected_output + '\n')
c.cmd_and_response('Assume has_all_types names arE float;')
expected_output += 'ASSUME has_all_types NAMES ARE float;\n'
output = c.cmd_and_response('show assumptions')
self.assertEqual(output, expected_output + '\n')
c.cmd_and_response('assume twenty_rows_table ( b ) values are decimal;')
expected_output += 'ASSUME twenty_rows_table(b) VALUES ARE decimal;\n'
output = c.cmd_and_response('show assumptions')
self.assertEqual(output, expected_output + '\n')
def test_eof_prints_newline(self):
with testrun_cqlsh(tty=True) as c:
c.send(CONTROL_D)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, '\n')
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_exit_prints_no_newline(self):
for semicolon in ('', ';'):
with testrun_cqlsh(tty=True) as c:
cmd = 'exit%s\n' % semicolon
c.send(cmd)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, cmd)
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_help_types(self):
with testrun_cqlsh(tty=True) as c:
output = c.cmd_and_response('help types')
def test_help(self):
pass
def test_printing_parse_error(self):
pass
def test_printing_lex_error(self):
pass
def test_multiline_statements(self):
pass
def test_cancel_statement(self):
pass
def test_printing_integrity_error(self):
pass
def test_printing_cql_error(self):
pass
def test_empty_line(self):
pass
| muntasirraihan/apache-cassandra-1.2.4-src | pylib/cqlshlib/test/test_cqlsh_output.py | Python | apache-2.0 | 36,622 |
from __future__ import unicode_literals
from django.db.models.query_utils import InvalidQuery
from django.test import TestCase
from .models import (
BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
)
class AssertionMixin(object):
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = len(obj.get_deferred_fields())
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
# The related_id is alawys set even if it's not fetched from the DB,
# so pk and related_id are not deferred.
self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 2)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = 'Cannot pass None as an argument to only().'
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(Primary.objects.defer("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_only_values_does_not_defer(self):
self.assertEqual(Primary.objects.only("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_select_related_raises_invalid_query(self):
msg = (
'Field Primary.related cannot be both deferred and traversed '
'using select_related at the same time.'
)
with self.assertRaisesMessage(InvalidQuery, msg):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
msg = (
'Field Primary.related cannot be both deferred and traversed using '
'select_related at the same time.'
)
with self.assertRaisesMessage(InvalidQuery, msg):
Primary.objects.only("name").select_related("related")[0]
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# select_related() overrides defer().
with self.assertNumQueries(1):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"p1", "a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_sublcass(self):
# You can retrieve a single field on a subclass
obj = BigChild.objects.only("other").get(name="b1")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
class TestDefer2(AssertionMixin, TestCase):
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
def test_defer_inheritance_pk_chaining(self):
"""
When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available. Tests that this
is done.
"""
s1 = Secondary.objects.create(first="x1", second="y1")
bc = BigChild.objects.create(name="b1", value="foo", related=s1,
other="bar")
bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
def test_eq(self):
s1 = Secondary.objects.create(first="x1", second="y1")
s1_defer = Secondary.objects.only('pk').get(pk=s1.pk)
self.assertEqual(s1, s1_defer)
self.assertEqual(s1_defer, s1)
def test_refresh_not_loading_deferred_fields(self):
s = Secondary.objects.create()
rf = Primary.objects.create(name='foo', value='bar', related=s)
rf2 = Primary.objects.only('related', 'value').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
rf2.refresh_from_db()
self.assertEqual(rf2.value, 'new bar')
with self.assertNumQueries(1):
self.assertEqual(rf2.name, 'new foo')
def test_custom_refresh_on_deferred_loading(self):
s = Secondary.objects.create()
rf = RefreshPrimaryProxy.objects.create(name='foo', value='bar', related=s)
rf2 = RefreshPrimaryProxy.objects.only('related').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
# Customized refresh_from_db() reloads all deferred fields on
# access of any of them.
self.assertEqual(rf2.name, 'new foo')
self.assertEqual(rf2.value, 'new bar')
| baylee/django | tests/defer/tests.py | Python | bsd-3-clause | 11,390 |
name="Ada Lovelace"
print(name.title())
| ClicksThroughScience/New-Creations | name.py | Python | mit | 40 |
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as la
import sys
sys.path.append('..')
import uncompiled_floq.helpers as h
import uncompiled_floq.blockmatrix as bm
import uncompiled_floq.fixed_system as fs
import uncompiled_floq.errors as errors
import itertools
import copy
import cmath
def get_u(hf, params):
"""
Calculate the time evolution operator U
given a Fourier transformed Hamiltonian Hf
"""
k = assemble_k(hf, params)
vals, vecs = find_eigensystem(k, params)
phi = calculate_phi(vecs)
psi = calculate_psi(vecs, params)
return calculate_u(phi, psi, vals, params)
def get_u_and_du(hf, dhf, params):
"""
Calculate the time evolution operator U
given a Fourier transformed Hamiltonian Hf,
as well as its derivative dU given dHf
"""
k = assemble_k(hf, params)
vals, vecs = find_eigensystem(k, params)
phi = calculate_phi(vecs)
psi = calculate_psi(vecs, params)
u = calculate_u(phi, psi, vals, params)
dk = assemble_dk(dhf, params)
du = calculate_du(dk, psi, vals, vecs, params)
return [u, du]
def assemble_k(hf, p):
hf_max = (p.nc-1)/2
nz = p.nz
nc = p.nc
dim = p.dim
omega = p.omega
k = np.zeros([p.k_dim, p.k_dim], dtype='complex128')
# Assemble K by placing each component of Hf in turn, which
# for a fixed Fourier index lie on diagonals, with 0 on the
# main diagonal, positive numbers on the right and negative on the left
#
# The first row is therefore essentially Hf(0) Hf(1) ... Hf(hf_max) 0 0 0 ...
# The last row is then ... 0 0 0 Hf(-hf_max) ... Hf(0)
# Note that the main diagonal acquires a factor of omega*identity*(row/column number)
for n in xrange(-hf_max, hf_max+1):
start_row = max(0, n) # if n < 0, start at row 0
start_col = max(0, -n) # if n > 0, start at col 0
stop_row = min((nz-1)+n, nz-1)
stop_col = min((nz-1)-n, nz-1)
row = start_row
col = start_col
current_component = hf[h.n_to_i(n, nc)]
while row <= stop_row and col <= stop_col:
if n == 0:
block = current_component + np.identity(dim)*omega*h.i_to_n(row, nz)
bm.set_block_in_matrix(block, k, dim, nz, row, col)
else:
bm.set_block_in_matrix(current_component, k, dim, nz, row, col)
row += 1
col += 1
return k
def assemble_dk(dhf, p):
p2 = copy.copy(p)
p2.omega = 0.0
return np.array([assemble_k(dhf[i], p2) for i in xrange(0, p.np)])
def find_eigensystem(k, p):
# Find eigenvalues and eigenvectors for k,
# identify the dim unique ones,
# return them in a segmented form
vals, vecs = compute_eigensystem(k, p)
unique_vals = find_unique_vals(vals, p)
vals = vals.round(p.decimals)
indices_unique_vals = [np.where(vals == eva)[0][0] for eva in unique_vals]
unique_vecs = np.array([vecs[:, i] for i in indices_unique_vals])
unique_vecs = separate_components(unique_vecs, p.nz)
return [unique_vals, unique_vecs]
def compute_eigensystem(k, p):
# Find eigenvalues and eigenvectors of k,
# using the method specified in the parameters
if p.sparse:
k = sp.csc_matrix(k)
number_of_eigs = min(2*p.dim, p.k_dim)
vals, vecs = la.eigs(k, k=number_of_eigs, sigma=0.0)
else:
vals, vecs = np.linalg.eig(k)
vals, vecs = trim_eigensystem(vals, vecs, p)
vals = vals.real.astype(np.float64, copy=False)
return vals, vecs
def trim_eigensystem(vals, vecs, p):
# Trim eigenvalues and eigenvectors to only 2*dim ones
# clustered around zero
# Sort eigenvalues and -vectors in increasing order
idx = vals.argsort()
vals = vals[idx]
vecs = vecs[:, idx]
# Only keep values around 0
middle = p.k_dim/2
cutoff_left = max(0, middle - p.dim)
cutoff_right = min(p.k_dim, cutoff_left + 2*p.dim)
cut_vals = vals[cutoff_left:cutoff_right]
cut_vecs = vecs[:, cutoff_left:cutoff_right]
return cut_vals, cut_vecs
def find_unique_vals(vals, p):
# In the list of values supplied, find the set of dim
# e_i that fulfil (e_i - e_j) mod omega != 0 for all i,j,
# and that lie closest to 0.
mod_vals = np.mod(vals, p.omega)
mod_vals = mod_vals.round(decimals=p.decimals) # round to suppress floating point issues
unique_vals = np.unique(mod_vals)
# the unique_vals are ordered and >= 0, but we'd rather have them clustered around 0
should_be_negative = np.where(unique_vals > p.omega/2.)
unique_vals[should_be_negative] = (unique_vals[should_be_negative]-p.omega).round(p.decimals)
if unique_vals.shape[0] != p.dim:
raise errors.EigenvalueNumberError(vals, unique_vals)
else:
return np.sort(unique_vals)
def separate_components(vecs, n):
# Given an array of vectors vecs,
# return an array of each of the vectors split into n sub-arrays
return np.array([np.split(eva, n) for eva in vecs])
def calculate_phi(vecs):
# Given an array of eigenvectors vecs,
# sum over all frequency components in each
return np.array([np.sum(eva, axis=0) for eva in vecs])
def calculate_psi(vecs, p):
# Given an array of eigenvectors vecs,
# sum over all frequency components in each,
# weighted by exp(- i omega t n), with n
# being the Fourier index of the component
psi = np.zeros([p.dim, p.dim], dtype='complex128')
for k in xrange(0, p.dim):
partial = np.zeros(p.dim, dtype='complex128')
for i in xrange(0, p.nz):
num = h.i_to_n(i, p.nz)
partial += np.exp(1j*p.omega*p.t*num)*vecs[k][i]
psi[k, :] = partial
return psi
def calculate_u(phi, psi, energies, p):
u = np.zeros([p.dim, p.dim], dtype='complex128')
for k in xrange(0, p.dim):
u += np.exp(-1j*p.t*energies[k])*np.outer(psi[k], np.conj(phi[k]))
return u
def calculate_du(dk, psi, vals, vecs, p):
dim = p.dim
nz_max = p.nz_max
nz = p.nz
npm = p.np
omega = p.omega
t = p.t
vecsstar = np.conj(vecs)
du = np.zeros([npm, dim, dim], dtype='complex128')
# (i1,n1) & (i2,n2) iterate over the full spectrum of k:
# i1, i2: unique eigenvalues/-vectors in 0th Brillouin zone
# n1, n2: related vals/vecs derived by shifting with those offsets (lying in the nth BZs)
uniques = xrange(0, dim)
offsets = xrange(-nz_max, nz_max+1)
for n1 in offsets:
phase = t*np.exp(1j*omega*t*n1)
for n2 in offsets:
for i1, i2 in itertools.product(uniques, uniques):
e1 = vals[i1] + n1*omega
e2 = vals[i2] + n2*omega
v1 = np.roll(vecsstar[i1], n1, axis=0)
v2 = np.roll(vecs[i2], n2, axis=0)
factor = phase*integral_factors(e1, e2, t)
product = np.outer(psi[i1], vecsstar[i2, h.n_to_i(-n2, nz)])
for c in xrange(0, npm):
du[c, :, :] += expectation_value(dk[c], v1, v2)*factor*product
return du
def integral_factors(e1, e2, t):
if e1 == e2:
return -1.0j*cmath.exp(-1j*t*e1)
else:
return (cmath.exp(-1j*t*e1)-cmath.exp(-1j*t*e2))/(t*(e1-e2))
def expectation_value(dk, v1, v2):
a = v1.flatten()
b = v2.flatten()
return np.dot(np.dot(a, dk), b)
| sirmarcel/floq | benchmark/museum_of_evolution/p3/evolution.py | Python | mit | 7,423 |
from functools import wraps
import brewer2mpl
import numpy as np
import matplotlib as mpl
from matplotlib import cm
# Get Set2 from ColorBrewer, a set of colors deemed colorblind-safe and
# pleasant to look at by Drs. Cynthia Brewer and Mark Harrower of Pennsylvania
# State University. These colors look lovely together, and are less
# saturated than those colors in Set1. For more on ColorBrewer, see:
# - Flash-based interactive map:
# http://colorbrewer2.org/
# - A quick visual reference to every ColorBrewer scale:
# http://bl.ocks.org/mbostock/5577023
#class Common(object):
# def __init__(self):
set2 = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors
# Another ColorBrewer scale. This one has nice "traditional" colors like
# reds and blues
set1 = brewer2mpl.get_map('Set1', 'qualitative', 9).mpl_colors
# A colormapcycle for stacked barplots
stackmaps = [brewer2mpl.get_map('YlGn', 'sequential', 8).mpl_colormap,
brewer2mpl.get_map('YlOrRd', 'sequential', 8).mpl_colormap]
# This context-decorator makes it possible to change the color cycle inside
# prettyplotlib without affecting pyplot
class _pretty:
rcParams = {'axes.color_cycle': set2, 'lines.linewidth': .75}
mpl_contexts = []
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
def __enter__(self):
context = mpl.rc_context(rc=self.rcParams)
self.mpl_contexts.append(context)
return context.__enter__()
def __exit__(self, *args):
return self.mpl_contexts.pop().__exit__(*args)
pretty = _pretty()
# This function returns a colorlist for barplots
def getcolors(cmap, yvals, n):
if isinstance(cmap, bool):
cmap = stackmaps[n%len(stackmaps)]
return [cmap( abs(int((float(yval)/np.max(yvals))*cmap.N) )) for yval in yvals]
# Set some commonly used colors
almost_black = '#262626'
light_grey = np.array([float(248) / float(255)] * 3)
reds = cm.Reds
reds.set_bad('white')
reds.set_under('white')
blues_r = cm.Blues_r
blues_r.set_bad('white')
blues_r.set_under('white')
# Need to 'reverse' red to blue so that blue=cold=small numbers,
# and red=hot=large numbers with '_r' suffix
blue_red = brewer2mpl.get_map('RdBu', 'Diverging', 11,
reverse=True).mpl_colormap
| olgabot/prettyplotlib | prettyplotlib/colors.py | Python | mit | 2,394 |
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for analyzer
"""
import json
import TestGyp
found = 'Found dependency'
found_all = 'Found dependency (all)'
not_found = 'No dependencies'
def _CreateConfigFile(files, additional_compile_targets, test_targets=[]):
"""Creates the analyzer config file, which is used as the input to analyzer.
See description of analyzer.py for description of the arguments."""
f = open('test_file', 'w')
to_write = {'files': files,
'test_targets': test_targets,
'additional_compile_targets': additional_compile_targets }
json.dump(to_write, f)
f.close()
def _CreateBogusConfigFile():
f = open('test_file','w')
f.write('bogus')
f.close()
def _ReadOutputFileContents():
f = open('analyzer_output', 'r')
result = json.load(f)
f.close()
return result
# NOTE: this would be clearer if it subclassed TestGypCustom, but that trips
# over a bug in pylint (E1002).
test = TestGyp.TestGypCustom(format='analyzer')
def CommonArgs():
return ('-Gconfig_path=test_file',
'-Ganalyzer_output_path=analyzer_output')
def run_analyzer(*args, **kw):
"""Runs the test specifying a particular config and output path."""
args += CommonArgs()
test.run_gyp('test.gyp', *args, **kw)
def run_analyzer2(*args, **kw):
"""Same as run_analyzer(), but passes in test2.gyp instead of test.gyp."""
args += CommonArgs()
test.run_gyp('test2.gyp', *args, **kw)
def run_analyzer3(*args, **kw):
"""Same as run_analyzer(), but passes in test3.gyp instead of test.gyp."""
args += CommonArgs()
test.run_gyp('test3.gyp', *args, **kw)
def run_analyzer4(*args, **kw):
"""Same as run_analyzer(), but passes in test3.gyp instead of test.gyp."""
args += CommonArgs()
test.run_gyp('test4.gyp', *args, **kw)
def EnsureContains(matched=False, compile_targets=set(), test_targets=set()):
"""Verifies output contains |compile_targets|."""
result = _ReadOutputFileContents()
if result.get('error', None):
print 'unexpected error', result.get('error')
test.fail_test()
if result.get('invalid_targets', None):
print 'unexpected invalid_targets', result.get('invalid_targets')
test.fail_test()
actual_compile_targets = set(result['compile_targets'])
if actual_compile_targets != compile_targets:
print 'actual compile_targets:', actual_compile_targets, \
'\nexpected compile_targets:', compile_targets
test.fail_test()
actual_test_targets = set(result['test_targets'])
if actual_test_targets != test_targets:
print 'actual test_targets:', actual_test_targets, \
'\nexpected test_targets:', test_targets
test.fail_test()
if matched and result['status'] != found:
print 'expected', found, 'got', result['status']
test.fail_test()
elif not matched and result['status'] != not_found:
print 'expected', not_found, 'got', result['status']
test.fail_test()
def EnsureMatchedAll(compile_targets, test_targets=set()):
result = _ReadOutputFileContents()
if result.get('error', None):
print 'unexpected error', result.get('error')
test.fail_test()
if result.get('invalid_targets', None):
print 'unexpected invalid_targets', result.get('invalid_targets')
test.fail_test()
if result['status'] != found_all:
print 'expected', found_all, 'got', result['status']
test.fail_test()
actual_compile_targets = set(result['compile_targets'])
if actual_compile_targets != compile_targets:
print ('actual compile_targets:', actual_compile_targets,
'\nexpected compile_targets:', compile_targets)
test.fail_test()
actual_test_targets = set(result['test_targets'])
if actual_test_targets != test_targets:
print ('actual test_targets:', actual_test_targets,
'\nexpected test_targets:', test_targets)
test.fail_test()
def EnsureError(expected_error_string):
"""Verifies output contains the error string."""
result = _ReadOutputFileContents()
if result.get('error', '').find(expected_error_string) == -1:
print 'actual error:', result.get('error', ''), '\nexpected error:', \
expected_error_string
test.fail_test()
def EnsureStdoutContains(expected_error_string):
if test.stdout().find(expected_error_string) == -1:
print 'actual stdout:', test.stdout(), '\nexpected stdout:', \
expected_error_string
test.fail_test()
def EnsureInvalidTargets(expected_invalid_targets):
"""Verifies output contains invalid_targets."""
result = _ReadOutputFileContents()
actual_invalid_targets = set(result['invalid_targets'])
if actual_invalid_targets != expected_invalid_targets:
print 'actual invalid_targets:', actual_invalid_targets, \
'\nexpected :', expected_invalid_targets
test.fail_test()
# Two targets, A and B (both static_libraries) and A depends upon B. If a file
# in B changes, then both A and B are output. It is not strictly necessary that
# A is compiled in this case, only B.
_CreateConfigFile(['b.c'], ['all'])
test.run_gyp('static_library_test.gyp', *CommonArgs())
EnsureContains(matched=True, compile_targets={'a' ,'b'})
# Verifies config_path must be specified.
test.run_gyp('test.gyp')
EnsureStdoutContains('Must specify files to analyze via config_path')
# Verifies config_path must point to a valid file.
test.run_gyp('test.gyp', '-Gconfig_path=bogus_file',
'-Ganalyzer_output_path=analyzer_output')
EnsureError('Unable to open file bogus_file')
# Verify 'invalid_targets' is present when bad target is specified.
_CreateConfigFile(['exe2.c'], ['bad_target'])
run_analyzer()
EnsureInvalidTargets({'bad_target'})
# Verifies config_path must point to a valid json file.
_CreateBogusConfigFile()
run_analyzer()
EnsureError('Unable to parse config file test_file')
# Trivial test of a source.
_CreateConfigFile(['foo.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Conditional source that is excluded.
_CreateConfigFile(['conditional_source.c'], ['all'])
run_analyzer()
EnsureContains(matched=False)
# Conditional source that is included by way of argument.
_CreateConfigFile(['conditional_source.c'], ['all'])
run_analyzer('-Dtest_variable=1')
EnsureContains(matched=True, compile_targets={'exe'})
# Two unknown files.
_CreateConfigFile(['unknown1.c', 'unoknow2.cc'], ['all'])
run_analyzer()
EnsureContains()
# Two unknown files.
_CreateConfigFile(['unknown1.c', 'subdir/subdir_sourcex.c'], ['all'])
run_analyzer()
EnsureContains()
# Included dependency
_CreateConfigFile(['unknown1.c', 'subdir/subdir_source.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe', 'exe3'})
# Included inputs to actions.
_CreateConfigFile(['action_input.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Don't consider outputs.
_CreateConfigFile(['action_output.c'], ['all'])
run_analyzer()
EnsureContains(matched=False)
# Rule inputs.
_CreateConfigFile(['rule_input.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Ignore path specified with PRODUCT_DIR.
_CreateConfigFile(['product_dir_input.c'], ['all'])
run_analyzer()
EnsureContains(matched=False)
# Path specified via a variable.
_CreateConfigFile(['subdir/subdir_source2.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Verifies paths with // are fixed up correctly.
_CreateConfigFile(['parent_source.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe', 'exe3'})
# Verifies relative paths are resolved correctly.
_CreateConfigFile(['subdir/subdir_source.h'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Verifies relative paths in inputs are resolved correctly.
_CreateConfigFile(['rel_path1.h'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Various permutations when passing in targets.
_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'],
['all'], ['exe', 'exe3'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe3'},
compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'], ['all'], ['exe'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
# Verifies duplicates are ignored.
_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'], ['all'],
['exe', 'exe'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['exe2.c'], ['all'], ['exe'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2'})
_CreateConfigFile(['exe2.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2'})
_CreateConfigFile(['subdir/subdir2b_source.c', 'exe2.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['subdir/subdir2b_source.c'], ['all'], ['exe3'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe3'}, compile_targets={'exe3'})
_CreateConfigFile(['exe2.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2'})
_CreateConfigFile(['foo.c'], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe'})
# Assertions when modifying build (gyp/gypi) files, especially when said files
# are included.
_CreateConfigFile(['subdir2/d.cc'], ['all'], ['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2()
EnsureContains(matched=True, test_targets={'exe', 'foo'},
compile_targets={'exe'})
_CreateConfigFile(['subdir2/subdir.includes.gypi'], ['all'],
['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2()
EnsureContains(matched=True, test_targets={'exe', 'foo'},
compile_targets={'exe'})
_CreateConfigFile(['subdir2/subdir.gyp'], ['all'],
['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2()
EnsureContains(matched=True, test_targets={'exe', 'foo'},
compile_targets={'exe'})
_CreateConfigFile(['test2.includes.gypi'], ['all'],
['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2()
EnsureContains(matched=True, test_targets={'exe', 'exe2', 'exe3'},
compile_targets={'exe', 'exe2', 'exe3'})
# Verify modifying a file included makes all targets dirty.
_CreateConfigFile(['common.gypi'], ['all'], ['exe', 'exe2', 'foo', 'exe3'])
run_analyzer2('-Icommon.gypi')
EnsureMatchedAll({'all', 'exe', 'exe2', 'foo', 'exe3'},
{'exe', 'exe2', 'foo', 'exe3'})
# Assertions from test3.gyp.
_CreateConfigFile(['d.c', 'f.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
_CreateConfigFile(['f.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
_CreateConfigFile(['f.c'], ['all'])
run_analyzer3()
EnsureContains(matched=True, compile_targets={'a', 'b'})
_CreateConfigFile(['c.c', 'e.c'], ['all'])
run_analyzer3()
EnsureContains(matched=True, compile_targets={'a', 'b', 'c', 'e'})
_CreateConfigFile(['d.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
_CreateConfigFile(['a.c'], ['all'], ['a', 'b'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
_CreateConfigFile(['a.c'], ['all'], ['a', 'b'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
_CreateConfigFile(['d.c'], ['all'], ['a', 'b'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a', 'b'},
compile_targets={'a', 'b'})
_CreateConfigFile(['f.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
_CreateConfigFile(['a.c'], ['all'], ['a'])
run_analyzer3()
EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
_CreateConfigFile(['a.c'], ['all'])
run_analyzer3()
EnsureContains(matched=True, compile_targets={'a'})
_CreateConfigFile(['d.c'], ['all'])
run_analyzer3()
EnsureContains(matched=True, compile_targets={'a', 'b'})
# Assertions around test4.gyp.
_CreateConfigFile(['f.c'], ['all'])
run_analyzer4()
EnsureContains(matched=True, compile_targets={'e', 'f'})
_CreateConfigFile(['d.c'], ['all'])
run_analyzer4()
EnsureContains(matched=True, compile_targets={'a', 'b', 'c', 'd'})
_CreateConfigFile(['i.c'], ['all'])
run_analyzer4()
EnsureContains(matched=True, compile_targets={'h', 'i'})
# Assertions where 'all' is not supplied in compile_targets.
_CreateConfigFile(['exe2.c'], [], ['exe2'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe2'}, compile_targets={'exe2'})
_CreateConfigFile(['exe20.c'], [], ['exe2'])
run_analyzer()
EnsureContains(matched=False)
_CreateConfigFile(['exe2.c', 'exe3.c'], [], ['exe2', 'exe3'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe2', 'exe3'},
compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['exe2.c', 'exe3.c'], ['exe3'], ['exe2'])
run_analyzer()
EnsureContains(matched=True, test_targets={'exe2'},
compile_targets={'exe2', 'exe3'})
_CreateConfigFile(['exe3.c'], ['exe2'], ['exe2'])
run_analyzer()
EnsureContains(matched=False)
# Assertions with 'all' listed as a test_target.
_CreateConfigFile(['exe3.c'], [], ['all'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe3'}, test_targets={'all'})
_CreateConfigFile(['exe2.c'], [], ['all', 'exe2'])
run_analyzer()
EnsureContains(matched=True, compile_targets={'exe2'},
test_targets={'all', 'exe2'})
test.pass_test()
| pnigos/gyp | test/analyzer/gyptest-analyzer.py | Python | bsd-3-clause | 13,761 |
# Copyright (c) 2015 OpenStack Foundation
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from requests_mock.contrib import fixture as rm_fixture
from glanceclient import client
from glanceclient.tests.unit.v2.fixtures import image_create_fixture
from glanceclient.tests.unit.v2.fixtures import image_list_fixture
from glanceclient.tests.unit.v2.fixtures import image_show_fixture
from glanceclient.tests.unit.v2.fixtures import schema_fixture
from glanceclient.tests import utils as testutils
from glanceclient.v2.image_schema import _BASE_SCHEMA
class ClientTestRequests(testutils.TestCase):
"""Client tests using the requests mock library."""
def test_list_bad_image_schema(self):
# if kernel_id or ramdisk_id are not uuids, verify we can
# still perform an image listing. Regression test for bug
# 1477910
self.requests = self.useFixture(rm_fixture.Fixture())
self.requests.get('http://example.com/v2/schemas/image',
json=schema_fixture)
self.requests.get('http://example.com/v2/images?limit=20',
json=image_list_fixture)
gc = client.Client(2.2, "http://example.com/v2.1")
images = gc.images.list()
for image in images:
pass
def test_show_bad_image_schema(self):
# if kernel_id or ramdisk_id are not uuids, verify we
# don't fail due to schema validation
self.requests = self.useFixture(rm_fixture.Fixture())
self.requests.get('http://example.com/v2/schemas/image',
json=schema_fixture)
self.requests.get('http://example.com/v2/images/%s'
% image_show_fixture['id'],
json=image_show_fixture)
gc = client.Client(2.2, "http://example.com/v2.1")
img = gc.images.get(image_show_fixture['id'])
self.assertEqual(image_show_fixture['checksum'], img['checksum'])
def test_invalid_disk_format(self):
self.requests = self.useFixture(rm_fixture.Fixture())
self.requests.get('http://example.com/v2/schemas/image',
json=_BASE_SCHEMA)
self.requests.post('http://example.com/v2/images',
json=image_create_fixture)
self.requests.get('http://example.com/v2/images/%s'
% image_show_fixture['id'],
json=image_show_fixture)
gc = client.Client(2.2, "http://example.com/v2.1")
fields = {"disk_format": "qbull2"}
try:
gc.images.create(**fields)
self.fail("Failed to raise exception when using bad disk format")
except TypeError:
pass
def test_valid_disk_format(self):
self.requests = self.useFixture(rm_fixture.Fixture())
self.requests.get('http://example.com/v2/schemas/image',
json=_BASE_SCHEMA)
self.requests.post('http://example.com/v2/images',
json=image_create_fixture)
self.requests.get('http://example.com/v2/images/%s'
% image_show_fixture['id'],
json=image_show_fixture)
gc = client.Client(2.2, "http://example.com/v2.1")
fields = {"disk_format": "vhdx"}
gc.images.create(**fields)
| openstack/python-glanceclient | glanceclient/tests/unit/v2/test_client_requests.py | Python | apache-2.0 | 3,930 |
# coding: utf-8
from . import pos_order
| grap/odoo-addons-it | intercompany_trade_point_of_sale/models/__init__.py | Python | agpl-3.0 | 40 |
import numpy as np
from PIL import Image
data = np.fromfile('data/float32.dat', dtype=np.float32)
data = data.reshape((360,720))
Image.fromarray(data*10**7).show()
| jsheedy/velotronheavyindustries.com | intro-to-d3-grid-map/bin/load_data.py | Python | mit | 165 |
import os
from django.core.exceptions import ImproperlyConfigured
from unipath import Path
def get_env_variable(var_name):
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the {} environment variable".format(var_name)
raise ImproperlyConfigured(error_msg)
BASE_DIR = Path(__file__).ancestor(3)
SECRET_KEY = get_env_variable("SECRET_KEY")
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'contact_form',
'tweets',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.child("templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Phoenix'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
BASE_DIR.child("static"),
)
STATIC_URL = '/static/'
MEDIA_ROOT = BASE_DIR.child("media")
# Twitter settings
TWITTER_APP_KEY = get_env_variable("TWITTER_APP_KEY")
TWITTER_APP_SECRET = get_env_variable("TWITTER_APP_SECRET")
TWITTER_OAUTH_TOKEN = get_env_variable("TWITTER_OAUTH_TOKEN")
TWITTER_OAUTH_TOKEN_SECRET = get_env_variable("TWITTER_OAUTH_TOKEN_SECRET")
# Celery settings
BROKER_URL = 'redis://localhost:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json' | shazadan/mood-map | config/settings/base.py | Python | gpl-2.0 | 2,641 |
from setuptools import find_packages, setup
VERSION = "4.0.0"
LONG_DESCRIPTION = """
.. image:: http://pinaxproject.com/pinax-design/patches/pinax-announcements.svg
:target: https://pypi.python.org/pypi/pinax-announcements/
===================
Pinax Announcements
===================
.. image:: https://img.shields.io/pypi/v/pinax-announcements.svg
:target: https://pypi.python.org/pypi/pinax-announcements/
\
.. image:: https://img.shields.io/circleci/project/github/pinax/pinax-announcements.svg
:target: https://circleci.com/gh/pinax/pinax-announcements
.. image:: https://img.shields.io/codecov/c/github/pinax/pinax-announcements.svg
:target: https://codecov.io/gh/pinax/pinax-announcements
.. image:: https://img.shields.io/github/contributors/pinax/pinax-announcements.svg
:target: https://github.com/pinax/pinax-announcements/graphs/contributors
.. image:: https://img.shields.io/github/issues-pr/pinax/pinax-announcements.svg
:target: https://github.com/pinax/pinax-announcements/pulls
.. image:: https://img.shields.io/github/issues-pr-closed/pinax/pinax-announcements.svg
:target: https://github.com/pinax/pinax-announcements/pulls?q=is%3Apr+is%3Aclosed
\
.. image:: http://slack.pinaxproject.com/badge.svg
:target: http://slack.pinaxproject.com/
.. image:: https://img.shields.io/badge/license-MIT-blue.svg
:target: https://opensource.org/licenses/MIT/
\
``pinax-announcements`` is a well tested, documented, and proven solution
for any site wanting announcements for it's users.
Announcements have title and content, with options for filtering their display:
* ``site_wide`` - True or False
* ``members_only`` - True or False
* ``publish_start`` - date/time or none
* ``publish_end`` - date/time or none
``pinax-announcements`` has three options for dismissing an announcement:
* ``DISMISSAL_NO`` - always visible
* ``DISMISSAL_SESSION`` - dismiss for the session
* ``DISMISSAL_PERMANENT`` - dismiss forever
Supported Django and Python Versions
------------------------------------
+-----------------+-----+-----+-----+
| Django / Python | 3.6 | 3.7 | 3.8 |
+=================+=====+=====+=====+
| 2.2 | * | * | * |
+-----------------+-----+-----+-----+
| 3.0 | * | * | * |
+-----------------+-----+-----+-----+
"""
setup(
author="Pinax Team",
author_email="[email protected]",
description="a Django announcements app",
name="pinax-announcements",
long_description=LONG_DESCRIPTION,
version=VERSION,
url="http://github.com/pinax/pinax-announcements/",
license="MIT",
packages=find_packages(),
package_data={
"announcements": []
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=[
"django>=2.2",
],
tests_require=[
"django-test-plus>=1.0.22",
"pinax-templates>=1.0.4",
"mock>=2.0.0",
],
test_suite="runtests.runtests",
zip_safe=False
)
| pinax/django-announcements | setup.py | Python | mit | 3,593 |
# -*- coding: utf-8 -*-
import sys, re, subprocess, os
from os.path import join as pjoin
from pyLibQMC import parseCML, paths, misc
try:
import DCVizWrapper as viz
canViz = True
except:
print "Displaying results not available: No DCViz installation found."
canViz = False
def initRun(n_p, path, name, N, bin_edge, n_cores, mpiFlag, openGUI):
exe = pjoin(paths.programPath, misc.QMC2programName)
mpi = ""
if mpiFlag:
mpi = "mpiexec -n %d" % n_cores
args = mpi.split() + [exe, "redist", n_p, path, name, N, bin_edge]
subprocess.call(args)
ending = "arma"
if "Diatom" in name or "QDots3D" in name or "Atoms" in name:
ending = "arma3D"
outPath = "%swalker_positions/__type___out_%s_edge%s.__end__" % (path, name, bin_edge)
dist_path = outPath.replace("__type__", "dist").replace("__end__", ending)
radial_path = outPath.replace("__type__", "radial").replace("__end__", "arma")
if not os.path.exists(dist_path):
print dist_path
outPath = outPath.replace("_xy", "")
dist_path = outPath.replace("__type__", "dist")
radial_path = outPath.replace("__type__", "radial")
if not os.path.exists(dist_path):
print dist_path
print "something went wrong..."
if canViz and openGUI:
if ending != "arma3D": viz.main(dist_path, False)
viz.main(radial_path, False)
def main():
stdoutToFile, mpiFlag, openGUI, n_cores = parseCML(sys.argv)
print "MPI Nodes: ", n_cores
#path N binEdge
if len(sys.argv) < 3 or len(sys.argv) > 6:
print "Error in command line arguments. [rawdata, N, bin egdes ...]"
sys.exit(1)
rawfile = sys.argv[1]
if 'rawdata' not in rawfile:
print "Selected file is not a distribution data file."
sys.exit(1)
N = sys.argv[2]
bin_edge = sys.argv[3]
name = re.findall("dist_rawdata_(.+).arma", rawfile)[0]
n_p = re.findall("(\d+)c\d", name)[0]
path = rawfile.split("walker_positions")[0]
initRun(n_p, path, name, N, bin_edge, n_cores, mpiFlag, openGUI)
if __name__ == "__main__":
main()
| jorgehog/QMC2 | tools/reCalcDist.py | Python | gpl-3.0 | 2,222 |
# -*- coding: utf-8 -*-
import sys
import os
dirname = os.path.dirname(__file__)
class ChallengeAlreadyExist(Exception): pass
if __name__ == '__main__':
if len(sys.argv) == 2:
ch_name = sys.argv[1]
newdir = os.path.join(dirname, ch_name)
if not os.path.exists(newdir):
os.makedirs(newdir)
else:
raise ChallengeAlreadyExist("{} already exists".format(ch_name))
ch_initpy = open(newdir + '/__init__.py', mode='wt')
ch_initpy.write('''
# -*- coding: utf-8 -*-
from flask import Blueprint
{ch_name}_blueprint = Blueprint('{ch_name}_blueprint', __name__)
from . import {ch_name}'''.format(ch_name=ch_name))
ch_initpy.close()
ch_file = open(newdir + '/{ch_name}.py'.format(ch_name=ch_name), mode='wt')
ch_file.write('''
# -*- coding: utf-8 -*-
from flask import render_template
from . import {ch_name}_blueprint
@{ch_name}_blueprint.route('/{ch_name}')
def {ch_name}_view():
return render_template('{ch_name}.html')'''.format(ch_name=ch_name))
ch_file.close()
app_initpy = open(dirname + '/__init__.py', mode='at')
app_initpy.write('''
from .{ch_name} import {ch_name}_blueprint
app.register_blueprint({ch_name}_blueprint)'''.format(ch_name=ch_name))
app_initpy.close()
template = open(dirname + '/templates/{ch_name}.html'.format(ch_name=ch_name), mode='wt')
template.write(open(dirname + '/templates/ch_0x00.html', mode='rt').read())
template.close()
| NQysit/pybwap | pybwap/newchallenge.py | Python | mit | 1,519 |
#!/usr/bin/python3
from __future__ import print_function
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import sys
import scipy
from tqdm import tqdm
import django
from django.utils import timezone
from django.db.models import Count
django.setup()
from papers.models import Article, Feature
import papers.utils as utils
def get_articles_without_features():
articles = Article.objects.annotate(num_children=Count('feature')).filter(num_children=0)
data = []
for art in articles:
data.append( (art.title, art.authors, art.abstract, art.keywords) )
return articles, data
def get_features(data):
features = scipy.sparse.csr_matrix( utils.compute_features( data ) )
return features
def add_features_to_db(articles, features, stride=200):
for k in tqdm(range(len(articles)//stride+1)):
start = int(k*stride)
end = min((k+1)*stride,features.shape[0])
A = scipy.sparse.coo_matrix(features[start:end])
Feature.objects.bulk_create( [ Feature( article=articles[int(i+k*stride)], index=int(j), value=float(v)) for i,j,v in zip(A.row, A.col, A.data) ] )
if __name__ == "__main__":
# Feature.objects.all().delete()
print("Finding articles without features...")
articles, data = get_articles_without_features()
if not len(data):
sys.exit()
print("Computing features for %i articles..."%(len(data)))
features = get_features(data)
print("Adding %i feature vectors to db... "%(features.shape[0]))
add_features_to_db(articles, features)
| fzenke/morla | scripts/compute_feature_vectors.py | Python | mit | 1,674 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crackdown_imperial_army_captain_hard')
mobileTemplate.setLevel(60)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("imperial")
mobileTemplate.setAssistRange(0)
mobileTemplate.setStalker(True)
mobileTemplate.setFaction("imperial")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_stormtrooper_captain_black_black.iff')
templates.add('object/mobile/shared_dressed_stormtrooper_captain_black_gold.iff')
templates.add('object/mobile/shared_dressed_stormtrooper_captain_white_white.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('crackdown_stormtrooper_captain_hard', mobileTemplate)
return | agry/NGECore2 | scripts/mobiles/generic/faction/imperial/crackdown_stormtrooper_captain.py | Python | lgpl-3.0 | 1,634 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-02-07 17:05:11
import os
import json
import time
import logging
import itertools
from six.moves import queue as Queue
from collections import deque
from six import iteritems, itervalues
from pyspider.libs import counter, utils
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': -1,
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 24 * 60 * 60
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self.task_queue = dict()
self._last_tick = int(time.time())
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = {}
self.projects[project['name']].update(project)
self.projects[project['name']]['md5sum'] = utils.md5string(project['script'])
if not self.projects[project['name']].get('active_tasks', None):
self.projects[project['name']]['active_tasks'] = deque(maxlen=self.ACTIVE_TASKS)
# load task queue when project is running and delete task_queue when project is stoped
if project['status'] in ('RUNNING', 'DEBUG'):
if project['name'] not in self.task_queue:
self._load_tasks(project['name'])
self.task_queue[project['name']].rate = project['rate']
self.task_queue[project['name']].burst = project['burst']
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
self.on_select_task({
'taskid': '_on_get_info',
'project': project['name'],
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': ['min_tick', ],
},
'process': {
'callback': '_on_get_info',
},
})
else:
if project['name'] in self.task_queue:
self.task_queue[project['name']].rate = 0
self.task_queue[project['name']].burst = 0
del self.task_queue[project['name']]
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
self.task_queue[project] = TaskQueue(rate=0, burst=0)
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
self.task_queue[project].put(taskid, priority, exetime)
logger.debug('project: %s loaded %d tasks.', project, len(self.task_queue[project]))
if self.projects[project]['status'] in ('RUNNING', 'DEBUG'):
self.task_queue[project].rate = self.projects[project]['rate']
self.task_queue[project].burst = self.projects[project]['burst']
else:
self.task_queue[project].rate = 0
self.task_queue[project].burst = 0
if project not in self._cnt['all']:
status_count = self.taskdb.status_count(project)
self._cnt['all'].value(
(project, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value((project, 'pending'), len(self.task_queue[project]))
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.task_queue:
logger.error('unknown project: %s', task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.task_queue[task['project']].put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
self.projects[task['project']].update(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.task_queue[task['project']]:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
if self.INQUEUE_LIMIT and len(self.task_queue[task['project']]) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
continue
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
task = self.on_old_request(task, oldtask)
else:
task = self.on_new_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if project['status'] not in ('DEBUG', 'RUNNING'):
continue
if project.get('min_tick', 0) == 0:
continue
if self._last_tick % int(project['min_tick']) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project['name'],
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
for project, task_queue in iteritems(self.task_queue):
if cnt >= limit:
break
# task queue
self.task_queue[project].check_update()
project_cnt = 0
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < limit / 10:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project, taskid))
project_cnt += 1
cnt += 1
cnt_dict[project] = project_cnt
for project, taskid in taskids:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
if not task:
continue
task = self.on_select_task(task)
return cnt_dict
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project['status'] != 'STOP':
continue
if now - project['updatetime'] < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project['group']):
continue
logger.warning("deleting project: %s!", project['name'])
if project['name'] in self.task_queue:
self.task_queue[project['name']].rate = 0
self.task_queue[project['name']].burst = 0
del self.task_queue[project['name']]
del self.projects[project['name']]
self.taskdb.drop(project['name'])
self.projectdb.drop(project['name'])
if self.resultdb:
self.resultdb.drop(project['name'])
def __len__(self):
return sum(len(x) for x in itervalues(self.task_queue))
def quit(self):
'''Set quit signal'''
self._quit = True
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("loading projects")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
try:
from six.moves.xmlrpc_server import SimpleXMLRPCServer
except ImportError:
from SimpleXMLRPCServer import SimpleXMLRPCServer
server = SimpleXMLRPCServer((bind, port), allow_none=True, logRequests=logRequests)
server.register_introspection_functions()
server.register_multicall_functions()
server.register_function(self.quit, '_quit')
server.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
server.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
server.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
server.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
server.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x['active_tasks']) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(tasks)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
server.register_function(get_active_tasks, 'get_active_tasks')
server.timeout = 0.5
while not self._quit:
server.handle_request()
server.server_close()
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.task_queue[task['project']].done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']]['active_tasks'].appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
if retried == 0:
next_exetime = 0
elif retried == 1:
next_exetime = 1 * 60 * 60
else:
next_exetime = 6 * (2 ** retried) * 60 * 60
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
elif retried >= retries:
next_exetime = -1
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['group'] = project_info.get('group')
task['project_md5sum'] = project_info.get('md5sum')
task['project_updatetime'] = project_info.get('updatetime', 0)
project_info['active_tasks'].appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_pyspider():
'''Close pyspider'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
shell.interact(
'pyspider shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_pyspider() - Close pyspider'
)
if not is_crawled:
self.ioloop.stop()
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']]['active_tasks'].appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
| scrollpointclick/pyspider | pyspider/scheduler/scheduler.py | Python | apache-2.0 | 33,344 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016 University of Dundee & Open Microscopy Environment.
All Rights Reserved.
Copyright 2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
pytest fixtures used as defined in conftest.py:
- gatewaywrapper
"""
def testInterpolateSetting(gatewaywrapper):
"""
Tests conn.getInterpolateSetting(), setting the value as Admin and
testing the output as a regular user.
"""
gatewaywrapper.loginAsAdmin()
configService = gatewaywrapper.gateway.getConfigService()
try:
configService.setConfigValue(
"omero.client.viewer.interpolate_pixels", "false")
gatewaywrapper.loginAsAuthor()
assert not gatewaywrapper.gateway.getInterpolateSetting()
finally:
# try/finally to make sure that we
# set back to 'true' (default value) to clean up
gatewaywrapper.loginAsAdmin()
configService = gatewaywrapper.gateway.getConfigService()
configService.setConfigValue(
"omero.client.viewer.interpolate_pixels", "true")
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getInterpolateSetting()
| joansmith/openmicroscopy | components/tools/OmeroPy/test/integration/gatewaytest/test_config_service.py | Python | gpl-2.0 | 1,266 |
"""
This bootstrap module should be used to setup parts of the admin plugin
that need to exist before all controllers are loaded. It is best used to
define/register hooks, setup namespaces, and the like.
"""
from pkg_resources import get_distribution
from cement.core.namespace import CementNamespace, register_namespace
VERSION = get_distribution('iustools.admin').version
# Setup the 'admin' namespace object
admin = CementNamespace(
label='admin',
description='Admin Plugin for Iustools',
version=VERSION,
controller='AdminController',
provider='iustools'
)
# Add a config option to the admin namespace. This is effectively the
# default setting for the config option. Overridden by config files, and then
# cli options.
admin.config['managed_tags'] = ['testing', 'stable', 'dev']
admin.config['managed_releases'] = ['el5', 'el5.6.z', 'el6', 'el6.0.z', 'el6.1.z', 'el6.2.z']
admin.config['managed_archs'] = ['i386', 'x86_64']
admin.config['repo_base_path'] = '~/ius-repo'
admin.config['remote_rsync_path'] = '~/ius-repo'
admin.config['remote_exclude'] = "[0-9]\.[0-9]"
admin.config['internal_remote_rsync_path'] = False
admin.config['internal_remote_exclude'] = False
admin.config['rpm_binpath'] = '/bin/rpm'
admin.config['rsync_binpath'] = '/usr/bin/rsync'
admin.config['createrepo_binpath'] = '/usr/bin/createrepo'
admin.config['createrepo_opts'] = '--database --checksum md5 --simple-md-filenames'
admin.config['yumarch_binpath'] = '/usr/bin/yum-arch'
admin.config['repoview_binpath'] = '/usr/bin/repoview'
admin.config['gpg_passphrase'] = ''
admin.config['announce_email'] = '[email protected]'
admin.config['smtp_from'] = '[email protected]'
admin.config['smtp_host'] = 'localhost'
admin.config['smtp_port'] = 25
admin.config['smtp_user'] = ''
admin.config['smtp_password'] = ''
admin.config['smtp_tls'] = False
admin.config['smtp_keyfile'] = '/etc/pki/tls/private/localhost.key'
admin.config['smtp_certfile'] = '/etc/pki/tls/certs/localhost.crt'
admin.config['smtp_subject_prefix'] = '[ius] '
admin.config['gpg_key_file_path'] = '/usr/share/ius-tools/IUS-COMMUNITY-GPG-KEY'
admin.config['eua_file_path'] = '/usr/share/ius-tools/IUS-COMMUNITY-EUA'
# Add a cli option to the admin namespace. This overrides the
# coresponding config option if passed
admin.options.add_option('--tag', action='store', dest='tag_label',
help='tag label')
admin.options.add_option('--sign', action='store_true', dest='sign',
help='sign stable packages')
admin.options.add_option('--clean', action='store_true', dest='clean',
help='clean destination repo before creation (used with gen-repo)')
admin.options.add_option('--delete', action='store_true', dest='delete',
help='delete old files from remote destination')
admin.options.add_option('--passphrase', action='store', metavar='STR',
dest='gpg_passphrase', help='gpg key passphrase')
# Officialize and register the namespace
register_namespace(admin)
| iuscommunity/ius-tools | src/iustools.admin/iustools/bootstrap/admin.py | Python | gpl-2.0 | 2,964 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import base as base_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertEqual(layer.variables, [])
self.assertEqual(layer.trainable_variables, [])
self.assertEqual(layer.non_trainable_variables, [])
if context.in_graph_mode():
# updates, losses only suppported in GRAPH mode
self.assertEqual(layer.updates, [])
self.assertEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
@test_util.run_in_graph_and_eager_modes()
def testAddWeight(self):
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertEqual(layer.variables, [variable])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertEqual(layer.variables, [variable, variable_2])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [variable_2])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
# regularizers only supported in GRAPH mode.
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
variable = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
def testGetVariable(self):
with self.test_session():
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
def call(self, inputs):
return inputs * 2
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
layer.apply(inputs)
layer.apply(inputs)
self.assertEqual([v.name for v in layer.variables],
['my_layer/my_var:0'])
# Creating a layer with no scope leads to lazy construction of
# the scope at apply() time. It uses scope "<current scope>/base_name"
lazy_layer = MyLayer(_reuse=True)
with variable_scope.variable_scope('new_scope'):
with variable_scope.variable_scope('my_layer'):
variable_scope.get_variable('my_var', [2, 2])
# Smoke test: it runs.
lazy_layer.apply(inputs)
# The variables were created outside of the Layer, and
# reuse=True, so the Layer does not own them and they are not
# stored in its collection.
self.assertEqual(lazy_layer.variables, [])
self.assertEqual(lazy_layer._scope.name, 'new_scope/my_layer')
# Creating a layer with no scope leads to lazy construction of
# the scope at apply() time. If 'scope' argument is passed to
# apply(), it uses that scope when accessing variables.
lazy_layer = MyLayer(_reuse=True)
with variable_scope.variable_scope('new_scope') as new_scope:
variable_scope.get_variable('my_var', [2, 2])
# Smoke test: it runs.
lazy_layer.apply(inputs, scope=new_scope)
# The variables were created outside of the Layer, and
# reuse=True, so the Layer does not own them and they are not
# stored in its collection.
self.assertEqual(lazy_layer.variables, [])
self.assertEqual(lazy_layer._scope.name, 'new_scope')
# Checking for graph equality is only done in GRAPH mode.
with ops.Graph().as_default():
inputs_ng = random_ops.random_uniform((5,), seed=1)
with self.assertRaisesRegexp(ValueError, r'graph are not the same'):
layer.apply(inputs_ng)
@test_util.run_in_graph_and_eager_modes()
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if context.in_graph_mode():
# op is only supported in GRAPH mode
self.assertEqual(outputs.op.name, 'my_layer/Square')
def testFirstCallCanCreateVariablesButSecondCanNotWhenBuildEmpty(self):
# Note that this test is only run in Graph mode since with EAGER mode we can
# still create a new variable on second call.
class MyLayer(base_layers.Layer):
def build(self, _):
# Do not mark the layer as built.
pass
def call(self, inputs):
self.my_var = self.add_variable('my_var', [2, 2])
if self.built:
# Skip creating on the first call; try to create after it's
# built. This is expected to fail.
self.add_variable('this_will_break_on_second_call', [2, 2])
return inputs + math_ops.square(self.my_var)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((2,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
self.assertEqual(outputs.op.name, 'my_layer/add')
self.assertEqual([v.name
for v in layer.variables], ['my_layer/my_var:0'])
with self.assertRaisesRegexp(ValueError,
'my_layer/this_will_break_on_second_call'):
layer.apply(inputs)
# The list of variables hasn't changed.
self.assertEqual([v.name
for v in layer.variables], ['my_layer/my_var:0'])
@test_util.run_in_graph_and_eager_modes()
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if context.in_graph_mode():
# op only supported in GRAPH mode.
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._graph, layer._graph)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
@test_util.run_in_graph_and_eager_modes()
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return inputs
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
@test_util.run_in_graph_and_eager_modes()
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(ndim=2)
def call(self, inputs):
return inputs
if context.in_graph_mode():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected ndim=2'):
layer.apply(constant_op.constant([1]))
# Note that we re-create the layer since in Eager mode, input spec checks
# only happen on first call.
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
if context.in_graph_mode():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'):
layer.apply(constant_op.constant([1]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[[1], [2]]]))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
if context.in_graph_mode():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'):
layer.apply(constant_op.constant([[[1], [2]]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'):
layer.apply(constant_op.constant(1, dtype=dtypes.int32))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant(1.0, dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected axis'):
layer.apply(constant_op.constant([1, 2, 3]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1, 2]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
@test_util.run_in_graph_and_eager_modes()
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected shape'):
layer.apply(constant_op.constant([[1, 2]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]]))
@test_util.run_in_graph_and_eager_modes()
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
layer.apply(constant_op.constant(1))
# Works
if context.in_graph_mode():
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
def test_get_updates_for(self):
a = base_layers.Input(shape=(2,))
dense_layer = core_layers.Dense(1)
dense_layer.add_update(0, inputs=a)
dense_layer.add_update(1, inputs=None)
self.assertEqual(dense_layer.get_updates_for(a), [0])
self.assertEqual(dense_layer.get_updates_for(None), [1])
def test_get_losses_for(self):
a = base_layers.Input(shape=(2,))
dense_layer = core_layers.Dense(1)
dense_layer.add_loss(0, inputs=a)
dense_layer.add_loss(1, inputs=None)
self.assertEqual(dense_layer.get_losses_for(a), [0])
self.assertEqual(dense_layer.get_losses_for(None), [1])
def testTopologicalAttributes(self):
# test layer attributes / methods related to cross-layer connectivity.
a = base_layers.Input(shape=(32,), name='input_a')
b = base_layers.Input(shape=(32,), name='input_b')
# test input, output, input_shape, output_shape
test_layer = core_layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
# test `get_*_at` methods
dense = core_layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
# Test invalid value for attribute retrieval.
with self.assertRaises(ValueError):
dense.get_input_at(2)
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
_ = new_dense.input
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
_ = new_dense.output
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
_ = new_dense.output_shape
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
a = base_layers.Input(shape=(3, 32))
a = base_layers.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
a = base_layers.Input(shape=(3, 32))
a = base_layers.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.output_shape
def testTopologicalAttributesMultiOutputLayer(self):
class PowersLayer(base_layers.Layer):
def call(self, inputs):
return [inputs**2, inputs**3]
x = base_layers.Input(shape=(32,))
test_layer = PowersLayer()
p1, p2 = test_layer(x) # pylint: disable=not-callable
self.assertEqual(test_layer.input, x)
self.assertEqual(test_layer.output, [p1, p2])
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])
def testTopologicalAttributesMultiInputLayer(self):
class AddLayer(base_layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
a = base_layers.Input(shape=(32,))
b = base_layers.Input(shape=(32,))
test_layer = AddLayer()
y = test_layer([a, b]) # pylint: disable=not-callable
self.assertEqual(test_layer.input, [a, b])
self.assertEqual(test_layer.output, y)
self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])
self.assertEqual(test_layer.output_shape, (None, 32))
@test_util.run_in_graph_and_eager_modes()
def test_count_params(self):
dense = core_layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = core_layers.Dense(16)
with self.assertRaises(ValueError):
dense.count_params()
@test_util.run_in_graph_and_eager_modes()
def testDictInputOutput(self):
class DictLayer(base_layers.Layer):
def call(self, inputs):
return {'l' + key: inputs[key] for key in inputs}
layer = DictLayer()
if context.in_graph_mode():
i1 = array_ops.placeholder('int32')
i2 = array_ops.placeholder('float32')
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
else:
i1 = constant_op.constant(3)
i2 = constant_op.constant(4.0)
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
self.assertEqual(3, result['label'].numpy())
self.assertEqual(4.0, result['logits'].numpy())
class NetworkTest(test.TestCase):
def testBasicNetwork(self):
# minimum viable network
x = base_layers.Input(shape=(32,))
dense = core_layers.Dense(2)
y = dense(x)
network = base_layers.Network(x, y, name='dense_network')
# test basic attributes
self.assertEqual(network.name, 'dense_network')
self.assertEqual(len(network.layers), 2) # InputLayer + Dense
self.assertEqual(network.layers[1], dense)
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, dense.trainable_weights)
self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights)
# test callability on Input
x_2 = base_layers.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 2])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 2])
# test network `trainable` attribute
network.trainable = False
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, [])
self.assertEqual(network.non_trainable_weights,
dense.trainable_weights + dense.non_trainable_weights)
def test_node_construction(self):
# test graph topology construction basics
a = base_layers.Input(shape=(32,), name='input_a')
b = base_layers.Input(shape=(32,), name='input_b')
self.assertEqual(a.get_shape().as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer._inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer._inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertEqual(node.inbound_layers, [])
self.assertEqual(node.input_tensors, [a])
self.assertEqual(node.input_shapes, [(None, 32)])
self.assertEqual(node.output_tensors, [a])
self.assertEqual(node.output_shapes, [(None, 32)])
dense = core_layers.Dense(16, name='dense_1')
dense(a)
dense(b)
self.assertEqual(len(dense._inbound_nodes), 2)
self.assertEqual(len(dense._outbound_nodes), 0)
self.assertEqual(dense._inbound_nodes[0].inbound_layers, [a_layer])
self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)
self.assertEqual(dense._inbound_nodes[1].inbound_layers, [b_layer])
self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)
self.assertEqual(dense._inbound_nodes[0].input_tensors, [a])
self.assertEqual(dense._inbound_nodes[1].input_tensors, [b])
# Test config
config_0 = dense._inbound_nodes[0].get_config()
self.assertEqual(config_0['outbound_layer'], dense.name)
def testMultiInputNetwork(self):
a = base_layers.Input(shape=(32,), name='input_a')
b = base_layers.Input(shape=(32,), name='input_b')
class AddLayer(base_layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
c = AddLayer()([a, b]) # pylint: disable=not-callable
network = base_layers.Network([a, b], c)
self.assertEqual(len(network.layers), 3) # 2 * InputLayer + AddLayer
# Test callability.
a2 = base_layers.Input(shape=(32,))
b2 = base_layers.Input(shape=(32,))
c2 = network([a2, b2])
self.assertEqual(c2.get_shape().as_list(), [None, 32])
def testMultiOutputNetwork(self):
x = base_layers.Input(shape=(32,))
y1 = core_layers.Dense(2)(x)
y2 = core_layers.Dense(3)(x)
network = base_layers.Network(x, [y1, y2])
self.assertEqual(len(network.layers), 3) # InputLayer + 2 * Dense
# Test callability.
x2 = base_layers.Input(shape=(32,))
outputs = network(x2)
self.assertEqual(type(outputs), list)
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].get_shape().as_list(), [None, 2])
self.assertEqual(outputs[1].get_shape().as_list(), [None, 3])
def testMultiInputMultiOutputNetworkSharedLayer(self):
a = base_layers.Input(shape=(32,), name='input_a')
b = base_layers.Input(shape=(32,), name='input_b')
dense = core_layers.Dense(2)
y1 = dense(a)
y2 = dense(b)
network = base_layers.Network([a, b], [y1, y2])
self.assertEqual(len(network.layers), 3) # 2 * InputLayer + Dense
# Test callability.
a2 = base_layers.Input(shape=(32,))
b2 = base_layers.Input(shape=(32,))
outputs = network([a2, b2])
self.assertEqual(type(outputs), list)
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].get_shape().as_list(), [None, 2])
self.assertEqual(outputs[1].get_shape().as_list(), [None, 2])
def testCrossDataFlows(self):
# Test the ability to have multi-output layers with outputs that get routed
# to separate layers
class PowersLayer(base_layers.Layer):
def call(self, inputs):
return [inputs**2, inputs**3]
x = base_layers.Input(shape=(32,))
p1, p2 = PowersLayer()(x) # pylint: disable=not-callable
y1 = core_layers.Dense(2)(p1)
y2 = core_layers.Dense(3)(p2)
network = base_layers.Network(x, [y1, y2])
self.assertEqual(len(network.layers), 4) # InputLayer + 2 * Dense + PLayer
# Test callability.
x2 = base_layers.Input(shape=(32,))
outputs = network(x2)
self.assertEqual(type(outputs), list)
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].get_shape().as_list(), [None, 2])
self.assertEqual(outputs[1].get_shape().as_list(), [None, 3])
def testNetworkAttributes(self):
x = base_layers.Input(shape=(32,))
z = core_layers.Dense(2, kernel_regularizer=lambda x: 0.01 * (x**2))(x)
dense = core_layers.Dense(2, name='dense')
dense.add_update(1)
y = dense(z)
net = base_layers.Network(x, y)
# losses
self.assertEqual(len(net.losses), 1)
# updates
self.assertEqual(len(net.updates), 1)
# get_layer
self.assertEqual(net.get_layer('dense'), dense)
self.assertEqual(net.get_layer(index=2), dense)
with self.assertRaises(ValueError):
net.get_layer('dense_unknown')
with self.assertRaises(ValueError):
net.get_layer()
with self.assertRaises(ValueError):
net.get_layer(index=4)
# input, output
self.assertEqual(net.input, x)
self.assertEqual(net.output, y)
# input_shape, output_shape
self.assertEqual(net.input_shape, (None, 32))
self.assertEqual(net.output_shape, (None, 2))
# get_*_at
self.assertEqual(net.get_input_at(0), x)
self.assertEqual(net.get_output_at(0), y)
# _compute_output_shape
self.assertEqual(net._compute_output_shape((3, 32)).as_list(), [3, 2])
def testInvalidNetworks(self):
# redundant inputs
x = base_layers.Input(shape=(32,))
y = core_layers.Dense(2)(x)
with self.assertRaises(ValueError):
base_layers.Network([x, x], y)
# inputs that don't come from Input
x = array_ops.placeholder(dtype='float32', shape=(None, 32))
y = core_layers.Dense(2)(x)
with self.assertRaises(ValueError):
base_layers.Network(x, y)
# inputs that don't come from Input but have a layer history
x = base_layers.Input(shape=(32,))
x = core_layers.Dense(32)(x)
y = core_layers.Dense(2)(x)
with self.assertRaises(ValueError):
base_layers.Network(x, y)
# outputs that don't come from layers
x = base_layers.Input(shape=(32,))
y = core_layers.Dense(2)(x)
y = 2 * y
with self.assertRaises(ValueError):
base_layers.Network(x, y)
# disconnected graphs
x1 = base_layers.Input(shape=(32,))
x2 = base_layers.Input(shape=(32,))
y = core_layers.Dense(2)(x1)
with self.assertRaises(ValueError):
base_layers.Network(x2, y)
# redundant layer names
x = base_layers.Input(shape=(32,))
z = core_layers.Dense(2, name='dense')(x)
y = core_layers.Dense(2, name='dense')(z)
with self.assertRaises(ValueError):
base_layers.Network(x, y)
def testInputTensorWrapping(self):
x = array_ops.placeholder(dtype='float32', shape=(None, 32))
x = base_layers.Input(tensor=x)
y = core_layers.Dense(2)(x)
base_layers.Network(x, y)
def testExplicitBatchSize(self):
x = base_layers.Input(shape=(32,), batch_size=3)
y = core_layers.Dense(2)(x)
self.assertEqual(y.get_shape().as_list(), [3, 2])
def testNetworkRecursion(self):
# test the ability of networks to be used as layers inside networks.
a = base_layers.Input(shape=(32,))
b = core_layers.Dense(2)(a)
net = base_layers.Network(a, b)
c = base_layers.Input(shape=(32,))
d = net(c)
recursive_net = base_layers.Network(c, d)
self.assertEqual(len(recursive_net.layers), 2)
self.assertEqual(recursive_net.layers[1], net)
self.assertEqual(len(recursive_net.weights), 2)
# test callability
x = array_ops.placeholder(dtype='float32', shape=(None, 32))
y = recursive_net(x)
self.assertEqual(y.get_shape().as_list(), [None, 2])
def testSparseInput(self):
class SparseSoftmax(base_layers.Layer):
def call(self, inputs):
return sparse_ops.sparse_softmax(inputs)
x = base_layers.Input(shape=(32,), sparse=True)
y = SparseSoftmax()(x) # pylint: disable=not-callable
network = base_layers.Network(x, y)
self.assertEqual(len(network.layers), 2)
self.assertEqual(network.layers[0].sparse, True)
@test_util.run_in_graph_and_eager_modes()
def testMaskingSingleInput(self):
class MaskedLayer(base_layers.Layer):
def call(self, inputs, mask=None):
if mask is not None:
return inputs * mask
return inputs
def compute_mask(self, inputs, mask=None):
return array_ops.ones_like(inputs)
if context.in_graph_mode():
x = base_layers.Input(shape=(32,))
y = MaskedLayer()(x) # pylint: disable=not-callable
network = base_layers.Network(x, y)
# test callability on Input
x_2 = base_layers.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 32])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 32])
else:
a = constant_op.constant([2] * 32)
mask = constant_op.constant([0, 1] * 16)
a._keras_mask = mask
b = MaskedLayer().apply(a)
self.assertTrue(hasattr(b, '_keras_mask'))
self.assertAllEqual(self.evaluate(array_ops.ones_like(mask)),
self.evaluate(getattr(b, '_keras_mask')))
self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))
class DeferredModeTest(test.TestCase):
def testDeferredTensorAttributes(self):
x = base_layers._DeferredTensor(shape=(None, 2), dtype='float32', name='x')
self.assertEqual(str(x),
'DeferredTensor(\'x\', shape=(?, 2), dtype=float32)')
self.assertEqual(repr(x),
'<_DeferredTensor \'x\' shape=(?, 2) dtype=float32>')
@test_util.run_in_graph_and_eager_modes()
def testSimpleNetworkBuilding(self):
inputs = base_layers.Input(shape=(32,))
if context.in_eager_mode():
self.assertIsInstance(inputs, base_layers._DeferredTensor)
self.assertEqual(inputs.dtype.name, 'float32')
self.assertEqual(inputs.shape.as_list(), [None, 32])
x = core_layers.Dense(2)(inputs)
if context.in_eager_mode():
self.assertIsInstance(x, base_layers._DeferredTensor)
self.assertEqual(x.dtype.name, 'float32')
self.assertEqual(x.shape.as_list(), [None, 2])
outputs = core_layers.Dense(4)(x)
network = base_layers.Network(inputs, outputs)
self.assertIsInstance(network, base_layers.Network)
if context.in_eager_mode():
# It should be possible to call such a network on EagerTensors.
inputs = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
outputs = network(inputs)
self.assertEqual(outputs.shape.as_list(), [10, 4])
@test_util.run_in_graph_and_eager_modes()
def testMultiIONetworkbuilding(self):
input_a = base_layers.Input(shape=(32,))
input_b = base_layers.Input(shape=(16,))
a = core_layers.Dense(16)(input_a)
class AddLayer(base_layers.Layer):
def call(self, inputs):
return inputs[0] + inputs[1]
def _compute_output_shape(self, input_shape):
return input_shape[0]
c = AddLayer()([a, input_b]) # pylint: disable=not-callable
c = core_layers.Dense(2)(c)
network = base_layers.Network([input_a, input_b], [a, c])
if context.in_eager_mode():
a_val = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
b_val = constant_op.constant(
np.random.random((10, 16)).astype('float32'))
outputs = network([a_val, b_val])
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [10, 16])
self.assertEqual(outputs[1].shape.as_list(), [10, 2])
if __name__ == '__main__':
test.main()
| ychfan/tensorflow | tensorflow/python/layers/base_test.py | Python | apache-2.0 | 33,604 |
import json
import logging
import requests
from appr.client import ApprClient
from appr.auth import ApprAuth
import kpm
__all__ = ['Registry']
logger = logging.getLogger(__name__)
DEFAULT_REGISTRY = 'http://localhost:5000'
API_PREFIX = '/api/v1'
DEFAULT_PREFIX = "/cnr"
class Registry(ApprClient):
def __init__(self, endpoint=DEFAULT_REGISTRY):
super(Registry, self).__init__(endpoint)
self._headers = {
'Content-Type': 'application/json',
'User-Agent': "kpmpy-cli/%s" % kpm.__version__
}
self.host = self.endpoint.geturl()
self.auth = ApprAuth(".appr")
def generate(self, name, namespace=None, variables={}, version=None, shards=None):
path = "/api/v1/packages/%s/generate" % name
params = {}
body = {}
body['variables'] = variables
if namespace:
params['namespace'] = namespace
if shards:
body['shards'] = shards
if version:
params['version'] = version
r = requests.get(
self._url(path), data=json.dumps(body), params=params, headers=self.headers)
r.raise_for_status()
return r.json()
| kubespray/kpm | kpm/registry.py | Python | apache-2.0 | 1,196 |
# vec2d.py -- written by Andrej Karpathy
import operator
import math
class vec2d(object):
"""2d vector class, supports vector and scalar operators,
and also provides a bunch of high level functions
"""
__slots__ = ['x', 'y']
def __init__(self, x_or_pair, y = None):
if y == None:
self.x = x_or_pair[0]
self.y = x_or_pair[1]
else:
self.x = x_or_pair
self.y = y
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("Invalid subscript "+str(key)+" to vec2d")
def __setitem__(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise IndexError("Invalid subscript "+str(key)+" to vec2d")
# String representaion (for debugging)
def __repr__(self):
return 'vec2d(%s, %s)' % (self.x, self.y)
# Comparison
def __eq__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x == other[0] and self.y == other[1]
else:
return False
def __ne__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x != other[0] or self.y != other[1]
else:
return True
def __nonzero__(self):
return self.x or self.y
# Generic operator handlers
def _o2(self, other, f):
"Any two-operator operation where the left operand is a vec2d"
if isinstance(other, vec2d):
return vec2d(f(self.x, other.x),
f(self.y, other.y))
elif (hasattr(other, "__getitem__")):
return vec2d(f(self.x, other[0]),
f(self.y, other[1]))
else:
return vec2d(f(self.x, other),
f(self.y, other))
def _r_o2(self, other, f):
"Any two-operator operation where the right operand is a vec2d"
if (hasattr(other, "__getitem__")):
return vec2d(f(other[0], self.x),
f(other[1], self.y))
else:
return vec2d(f(other, self.x),
f(other, self.y))
def _io(self, other, f):
"inplace operator"
if (hasattr(other, "__getitem__")):
self.x = f(self.x, other[0])
self.y = f(self.y, other[1])
else:
self.x = f(self.x, other)
self.y = f(self.y, other)
return self
# Addition
def __add__(self, other):
if isinstance(other, vec2d):
return vec2d(self.x + other.x, self.y + other.y)
elif hasattr(other, "__getitem__"):
return vec2d(self.x + other[0], self.y + other[1])
else:
return vec2d(self.x + other, self.y + other)
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, vec2d):
self.x += other.x
self.y += other.y
elif hasattr(other, "__getitem__"):
self.x += other[0]
self.y += other[1]
else:
self.x += other
self.y += other
return self
# Subtraction
def __sub__(self, other):
if isinstance(other, vec2d):
return vec2d(self.x - other.x, self.y - other.y)
elif (hasattr(other, "__getitem__")):
return vec2d(self.x - other[0], self.y - other[1])
else:
return vec2d(self.x - other, self.y - other)
def __rsub__(self, other):
if isinstance(other, vec2d):
return vec2d(other.x - self.x, other.y - self.y)
if (hasattr(other, "__getitem__")):
return vec2d(other[0] - self.x, other[1] - self.y)
else:
return vec2d(other - self.x, other - self.y)
def __isub__(self, other):
if isinstance(other, vec2d):
self.x -= other.x
self.y -= other.y
elif (hasattr(other, "__getitem__")):
self.x -= other[0]
self.y -= other[1]
else:
self.x -= other
self.y -= other
return self
# Multiplication
def __mul__(self, other):
if isinstance(other, vec2d):
return vec2d(self.x*other.y, self.y*other.y)
if (hasattr(other, "__getitem__")):
return vec2d(self.x*other[0], self.y*other[1])
else:
return vec2d(self.x*other, self.y*other)
__rmul__ = __mul__
def __imul__(self, other):
if isinstance(other, vec2d):
self.x *= other.x
self.y *= other.y
elif (hasattr(other, "__getitem__")):
self.x *= other[0]
self.y *= other[1]
else:
self.x *= other
self.y *= other
return self
# Division
def __div__(self, other):
return self._o2(other, operator.div)
def __rdiv__(self, other):
return self._r_o2(other, operator.div)
def __idiv__(self, other):
return self._io(other, operator.div)
def __floordiv__(self, other):
return self._o2(other, operator.floordiv)
def __rfloordiv__(self, other):
return self._r_o2(other, operator.floordiv)
def __ifloordiv__(self, other):
return self._io(other, operator.floordiv)
def __truediv__(self, other):
return self._o2(other, operator.truediv)
def __rtruediv__(self, other):
return self._r_o2(other, operator.truediv)
def __itruediv__(self, other):
return self._io(other, operator.floordiv)
# Modulo
def __mod__(self, other):
return self._o2(other, operator.mod)
def __rmod__(self, other):
return self._r_o2(other, operator.mod)
def __divmod__(self, other):
return self._o2(other, operator.divmod)
def __rdivmod__(self, other):
return self._r_o2(other, operator.divmod)
# Exponentation
def __pow__(self, other):
return self._o2(other, operator.pow)
def __rpow__(self, other):
return self._r_o2(other, operator.pow)
# Bitwise operators
def __lshift__(self, other):
return self._o2(other, operator.lshift)
def __rlshift__(self, other):
return self._r_o2(other, operator.lshift)
def __rshift__(self, other):
return self._o2(other, operator.rshift)
def __rrshift__(self, other):
return self._r_o2(other, operator.rshift)
def __and__(self, other):
return self._o2(other, operator.and_)
__rand__ = __and__
def __or__(self, other):
return self._o2(other, operator.or_)
__ror__ = __or__
def __xor__(self, other):
return self._o2(other, operator.xor)
__rxor__ = __xor__
# Unary operations
def __neg__(self):
return vec2d(operator.neg(self.x), operator.neg(self.y))
def __pos__(self):
return vec2d(operator.pos(self.x), operator.pos(self.y))
def __abs__(self):
return vec2d(abs(self.x), abs(self.y))
def __invert__(self):
return vec2d(-self.x, -self.y)
# vectory functions
def get_length_sqrd(self):
return self.x**2 + self.y**2
def get_length(self):
return math.sqrt(self.x**2 + self.y**2)
def __setlength(self, value):
length = self.get_length()
self.x *= value/length
self.y *= value/length
length = property(get_length, __setlength, None, "gets or sets the magnitude of the vector")
def rotate(self, angle_degrees):
radians = math.radians(angle_degrees)
cos = math.cos(radians)
sin = math.sin(radians)
x = self.x*cos - self.y*sin
y = self.x*sin + self.y*cos
self.x = x
self.y = y
def rotated(self, angle_degrees):
radians = math.radians(angle_degrees)
cos = math.cos(radians)
sin = math.sin(radians)
x = self.x*cos - self.y*sin
y = self.x*sin + self.y*cos
return vec2d(x, y)
def get_angle(self):
if (self.get_length_sqrd() == 0):
return 0
return math.degrees(math.atan2(self.y, self.x))
def __setangle(self, angle_degrees):
self.x = self.length
self.y = 0
self.rotate(angle_degrees)
angle = property(get_angle, __setangle, None, "gets or sets the angle of a vector")
def get_angle_between(self, other):
cross = self.x*other[1] - self.y*other[0]
dot = self.x*other[0] + self.y*other[1]
return math.degrees(math.atan2(cross, dot))
def normalized(self):
length = self.length
if length != 0:
return self/length
return vec2d(self)
def normalize_return_length(self):
length = self.length
if length != 0:
self.x /= length
self.y /= length
return length
def perpendicular(self):
return vec2d(-self.y, self.x)
def perpendicular_normal(self):
length = self.length
if length != 0:
return vec2d(-self.y/length, self.x/length)
return vec2d(self)
def dot(self, other):
return float(self.x*other[0] + self.y*other[1])
def get_distance(self, other):
return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)
def get_dist_sqrd(self, other):
return (self.x - other[0])**2 + (self.y - other[1])**2
def projection(self, other):
other_length_sqrd = other[0]*other[0] + other[1]*other[1]
projected_length_times_other_length = self.dot(other)
return other*(projected_length_times_other_length/other_length_sqrd)
def cross(self, other):
return self.x*other[1] - self.y*other[0]
def interpolate_to(self, other, range):
return vec2d(self.x + (other[0] - self.x)*range, self.y + (other[1] - self.y)*range)
def convert_to_basis(self, x_vector, y_vector):
return vec2d(self.dot(x_vector)/x_vector.get_length_sqrd(), self.dot(y_vector)/y_vector.get_length_sqrd())
def __getstate__(self):
return [self.x, self.y]
def __setstate__(self, dict):
self.x, self.y = dict
def tup(self):
return (self.x,self.y)
def inttup(self):
return (int(self.x), int(self.y))
| AjayMT/game-of-life | vec2d.py | Python | mit | 10,362 |
import copy
import logging
import platform
import os
from termcolor import colored
try:
# This import has side effects and is needed to make input() behave nicely
import readline # pylint: disable=unused-import
except ImportError: # pragma: no cover
pass
from coalib.misc.DictUtilities import inverse_dicts
from coalib.misc.Exceptions import log_exception
from coalib.bearlib.spacing.SpacingHelper import SpacingHelper
from coalib.results.Result import Result
from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction
from coalib.results.result_actions.OpenEditorAction import OpenEditorAction
from coalib.results.result_actions.IgnoreResultAction import IgnoreResultAction
from coalib.results.result_actions.DoNothingAction import DoNothingAction
from coalib.results.result_actions.GeneratePatchesAction import (
GeneratePatchesAction)
from coalib.results.result_actions.ShowAppliedPatchesAction import (
ShowAppliedPatchesAction)
from coalib.results.result_actions.PrintDebugMessageAction import (
PrintDebugMessageAction)
from coalib.results.result_actions.PrintMoreInfoAction import (
PrintMoreInfoAction)
from coalib.results.result_actions.ShowPatchAction import ShowPatchAction
from coalib.results.RESULT_SEVERITY import (
RESULT_SEVERITY, RESULT_SEVERITY_COLORS)
from coalib.settings.Setting import Setting
from coala_utils.string_processing.Core import join_names
from pygments import highlight
from pygments.formatters import (TerminalTrueColorFormatter,
TerminalFormatter)
from pygments.filters import VisibleWhitespaceFilter
from pygments.lexers import TextLexer, get_lexer_for_filename
from pygments.style import Style
from pygments.token import Token
from pygments.util import ClassNotFound
class BackgroundSourceRangeStyle(Style):
styles = {
Token: 'bold bg:#BB4D3E #111'
}
class BackgroundMessageStyle(Style):
styles = {
Token: 'bold bg:#eee #111'
}
class NoColorStyle(Style):
styles = {
Token: 'noinherit'
}
def highlight_text(no_color, text, style, lexer=TextLexer()):
formatter = TerminalTrueColorFormatter(style=style)
if no_color:
formatter = TerminalTrueColorFormatter(style=NoColorStyle)
return highlight(text, lexer, formatter)[:-1]
STR_GET_VAL_FOR_SETTING = ('Please enter a value for the setting \"{}\" ({}) '
'needed by {} for section \"{}\": ')
STR_LINE_DOESNT_EXIST = ('The line belonging to the following result '
'cannot be printed because it refers to a line '
"that doesn't seem to exist in the given file.")
STR_PROJECT_WIDE = 'Project wide:'
STR_ENTER_NUMBER = 'Enter number (Ctrl-{} to exit): '.format(
'Z' if platform.system() == 'Windows' else 'D')
FILE_NAME_COLOR = 'blue'
FILE_LINES_COLOR = 'blue'
CAPABILITY_COLOR = 'green'
HIGHLIGHTED_CODE_COLOR = 'red'
SUCCESS_COLOR = 'green'
REQUIRED_SETTINGS_COLOR = 'green'
CLI_ACTIONS = (OpenEditorAction(),
ApplyPatchAction(),
PrintDebugMessageAction(),
PrintMoreInfoAction(),
ShowPatchAction(),
IgnoreResultAction(),
ShowAppliedPatchesAction(),
GeneratePatchesAction())
DIFF_EXCERPT_MAX_SIZE = 4
def color_letter(console_printer, line):
x = -1
y = -1
letter = ''
for i, l in enumerate(line, 0):
if line[i] == '(':
x = i
if line[i] == ')':
y = i
if l.isupper() and x != -1:
letter = l
first_part = line[:x+1]
second_part = line[y:]
console_printer.print(first_part, end='')
console_printer.print(letter, color='blue', end='')
console_printer.print(second_part)
def format_lines(lines, symbol='', line_nr=''):
def sym(x): return ']' if x is '[' else x
return '\n'.join('{}{:>4}{} {}'.format(symbol, line_nr, sym(symbol), line)
for line in lines.rstrip('\n').split('\n'))
def print_section_beginning(console_printer, section):
"""
Will be called after initialization current_section in
begin_section()
:param console_printer: Object to print messages on the console.
:param section: The section that will get executed now.
"""
console_printer.print('Executing section {name}...'.format(
name=section.name))
def nothing_done(log_printer=None):
"""
Will be called after processing a coafile when nothing had to be done,
i.e. no section was enabled/targeted.
:param log_printer: A LogPrinter object.
"""
logging.warning('No existent section was targeted or enabled. Nothing to '
'do.')
def acquire_actions_and_apply(console_printer,
section,
file_diff_dict,
result,
file_dict,
cli_actions=None,
apply_single=False):
"""
Acquires applicable actions and applies them.
:param console_printer: Object to print messages on the console.
:param section: Name of section to which the result belongs.
:param file_diff_dict: Dictionary containing filenames as keys and Diff
objects as values.
:param result: A derivative of Result.
:param file_dict: A dictionary containing all files with filename as
key.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param cli_actions: The list of cli actions available.
"""
cli_actions = CLI_ACTIONS if cli_actions is None else cli_actions
failed_actions = set()
applied_actions = {}
while True:
actions = []
for action in cli_actions:
if action.is_applicable(result, file_dict, file_diff_dict) is True:
actions.append(action)
if actions == []:
return
action_dict = {}
metadata_list = []
for action in actions:
metadata = action.get_metadata()
action_dict[metadata.name] = action
metadata_list.append(metadata)
# User can always choose no action which is guaranteed to succeed
if apply_single:
ask_for_action_and_apply(console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions,
apply_single=apply_single)
break
elif not ask_for_action_and_apply(console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions,
apply_single=apply_single):
break
def print_lines(console_printer,
file_dict,
sourcerange):
"""
Prints the lines between the current and the result line. If needed
they will be shortened.
:param console_printer: Object to print messages on the console.
:param file_dict: A dictionary containing all files as values with
filenames as key.
:param sourcerange: The SourceRange object referring to the related
lines to print.
"""
no_color = not console_printer.print_colored
for i in range(sourcerange.start.line, sourcerange.end.line + 1):
# Print affected file's line number in the sidebar.
console_printer.print(format_lines(lines='', line_nr=i, symbol='['),
color=FILE_LINES_COLOR,
end='')
line = file_dict[sourcerange.file][i - 1].rstrip('\n')
try:
lexer = get_lexer_for_filename(sourcerange.file)
except ClassNotFound:
lexer = TextLexer()
lexer.add_filter(VisibleWhitespaceFilter(
spaces=True, tabs=True,
tabsize=SpacingHelper.DEFAULT_TAB_WIDTH))
# highlight() combines lexer and formatter to output a ``str``
# object.
printed_chars = 0
if i == sourcerange.start.line and sourcerange.start.column:
console_printer.print(highlight_text(
no_color, line[:sourcerange.start.column - 1],
BackgroundMessageStyle, lexer), end='')
printed_chars = sourcerange.start.column - 1
if i == sourcerange.end.line and sourcerange.end.column:
console_printer.print(highlight_text(
no_color, line[printed_chars:sourcerange.end.column - 1],
BackgroundSourceRangeStyle, lexer), end='')
console_printer.print(highlight_text(
no_color, line[sourcerange.end.column - 1:],
BackgroundSourceRangeStyle, lexer), end='')
console_printer.print('')
else:
console_printer.print(highlight_text(
no_color, line[printed_chars:], BackgroundMessageStyle, lexer),
end='')
console_printer.print('')
def print_result(console_printer,
section,
file_diff_dict,
result,
file_dict,
interactive=True,
apply_single=False):
"""
Prints the result to console.
:param console_printer: Object to print messages on the console.
:param section: Name of section to which the result belongs.
:param file_diff_dict: Dictionary containing filenames as keys and Diff
objects as values.
:param result: A derivative of Result.
:param file_dict: A dictionary containing all files with filename as
key.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param interactive: Variable to check whether or not to
offer the user actions interactively.
"""
no_color = not console_printer.print_colored
if not isinstance(result, Result):
logging.warning('One of the results can not be printed since it is '
'not a valid derivative of the coala result '
'class.')
return
if hasattr(section, 'name'):
console_printer.print('**** {bear} [Section: {section} | Severity: '
'{severity}] ****'
.format(bear=result.origin,
section=section.name,
severity=RESULT_SEVERITY.__str__(
result.severity)),
color=RESULT_SEVERITY_COLORS[result.severity])
else:
console_printer.print('**** {bear} [Section {section} | Severity '
'{severity}] ****'
.format(bear=result.origin, section='<empty>',
severity=RESULT_SEVERITY.__str__(
result.severity)),
color=RESULT_SEVERITY_COLORS[result.severity])
lexer = TextLexer()
result.message = highlight_text(no_color, result.message,
BackgroundMessageStyle, lexer)
console_printer.print(format_lines(result.message, symbol='!'))
if interactive:
cli_actions = CLI_ACTIONS
show_patch_action = ShowPatchAction()
if show_patch_action.is_applicable(
result, file_dict, file_diff_dict) is True:
diff_size = sum(len(diff) for diff in result.diffs.values())
if diff_size <= DIFF_EXCERPT_MAX_SIZE:
show_patch_action.apply_from_section(result,
file_dict,
file_diff_dict,
section)
cli_actions = tuple(action for action in cli_actions
if not isinstance(action, ShowPatchAction))
else:
print_diffs_info(result.diffs, console_printer)
acquire_actions_and_apply(console_printer,
section,
file_diff_dict,
result,
file_dict,
cli_actions,
apply_single=apply_single)
def print_diffs_info(diffs, printer):
"""
Prints diffs information (number of additions and deletions) to the console.
:param diffs: List of Diff objects containing corresponding diff info.
:param printer: Object responsible for printing diffs on console.
"""
for filename, diff in sorted(diffs.items()):
additions, deletions = diff.stats()
printer.print(
format_lines('+{additions} -{deletions} in {file}'.format(
file=filename,
additions=additions,
deletions=deletions), '!'),
color='green')
def print_results_formatted(log_printer,
section,
result_list,
file_dict,
*args):
"""
Prints results through the format string from the format setting done by
user.
:param log_printer: Printer responsible for logging the messages.
:param section: The section to which the results belong.
:param result_list: List of Result objects containing the corresponding
results.
"""
default_format = ('id:{id}:origin:{origin}:file:{file}:line:{line}:'
'column:{column}:end_line:{end_line}:end_column:'
'{end_column}:severity:{severity}:severity_str:'
'{severity_str}:message:{message}')
format_str = str(section.get('format', default_format))
if format_str == 'True':
format_str = default_format
for result in result_list:
severity_str = RESULT_SEVERITY.__str__(result.severity)
format_args = vars(result)
try:
if len(result.affected_code) == 0:
format_args['affected_code'] = None
print(format_str.format(file=None,
line=None,
end_line=None,
column=None,
end_column=None,
severity_str=severity_str,
message=result.message,
**format_args))
continue
for range in result.affected_code:
format_args['affected_code'] = range
format_args['source_lines'] = range.affected_source(file_dict)
print(format_str.format(file=range.start.file,
line=range.start.line,
end_line=range.end.line,
column=range.start.column,
end_column=range.end.column,
severity_str=severity_str,
message=result.message,
**format_args))
except KeyError as exception:
log_exception(
'Unable to print the result with the given format string.',
exception)
def print_bears_formatted(bears, format=None):
format_str = format or ('name:{name}:can_detect:{can_detect}:'
'can_fix:{can_fix}:description:{description}')
print('\n\n'.join(format_str.format(name=bear.name,
can_detect=bear.CAN_DETECT,
can_fix=bear.CAN_FIX,
description=bear.get_metadata().desc)
for bear in bears))
def print_affected_files(console_printer,
log_printer,
result,
file_dict):
"""
Prints all the affected files and affected lines within them.
:param console_printer: Object to print messages on the console.
:param log_printer: Printer responsible for logging the messages.
:param result: The result to print the context for.
:param file_dict: A dictionary containing all files with filename as
key.
"""
if len(result.affected_code) == 0:
console_printer.print('\n' + STR_PROJECT_WIDE,
color=FILE_NAME_COLOR)
else:
for sourcerange in result.affected_code:
if (
sourcerange.file is not None and
sourcerange.file not in file_dict):
logging.warning('The context for the result ({}) cannot '
'be printed because it refers to a file '
"that doesn't seem to exist ({})"
'.'.format(result, sourcerange.file))
else:
print_affected_lines(console_printer,
file_dict,
sourcerange)
def print_results_no_input(log_printer,
section,
result_list,
file_dict,
file_diff_dict,
console_printer,
apply_single=False):
"""
Prints all non interactive results in a section
:param log_printer: Printer responsible for logging the messages.
:param section: The section to which the results belong to.
:param result_list: List containing the results
:param file_dict: A dictionary containing all files with filename as
key.
:param file_diff_dict: A dictionary that contains filenames as keys and
diff objects as values.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param console_printer: Object to print messages on the console.
"""
for result in result_list:
print_affected_files(console_printer,
None,
result,
file_dict)
print_result(console_printer,
section,
file_diff_dict,
result,
file_dict,
interactive=False,
apply_single=apply_single)
def print_results(log_printer,
section,
result_list,
file_dict,
file_diff_dict,
console_printer,
apply_single=False):
"""
Prints all the results in a section.
:param log_printer: Printer responsible for logging the messages.
:param section: The section to which the results belong to.
:param result_list: List containing the results
:param file_dict: A dictionary containing all files with filename as
key.
:param file_diff_dict: A dictionary that contains filenames as keys and
diff objects as values.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param console_printer: Object to print messages on the console.
"""
for result in sorted(result_list):
print_affected_files(console_printer,
None,
result,
file_dict)
print_result(console_printer,
section,
file_diff_dict,
result,
file_dict,
apply_single=apply_single)
def print_affected_lines(console_printer, file_dict, sourcerange):
"""
Prints the lines affected by the bears.
:param console_printer: Object to print messages on the console.
:param file_dict: A dictionary containing all files with filename
as key.
:param sourcerange: The SourceRange object referring to the related
lines to print.
"""
console_printer.print('\n' + os.path.relpath(sourcerange.file),
color=FILE_NAME_COLOR)
if sourcerange.start.line is not None:
if len(file_dict[sourcerange.file]) < sourcerange.end.line:
console_printer.print(format_lines(lines=STR_LINE_DOESNT_EXIST,
line_nr=sourcerange.end.line,
symbol='!'))
else:
print_lines(console_printer,
file_dict,
sourcerange)
def require_setting(setting_name, arr, section):
"""
This method is responsible for prompting a user about a missing setting and
taking its value as input from the user.
:param setting_name: Name of the setting missing
:param arr: A list containing a description in [0] and the name
of the bears who need this setting in [1] and
following.
:param section: The section the action corresponds to.
"""
needed = join_names(arr[1:])
# Don't use input, it can't deal with escapes!
print(colored(STR_GET_VAL_FOR_SETTING.format(setting_name, arr[0], needed,
section.name),
REQUIRED_SETTINGS_COLOR))
return input()
def acquire_settings(log_printer, settings_names_dict, section):
"""
This method prompts the user for the given settings.
:param log_printer:
Printer responsible for logging the messages. This is needed to comply
with the interface.
:param settings_names_dict:
A dictionary with the settings name as key and a list containing a
description in [0] and the name of the bears who need this setting in
[1] and following.
Example:
::
{"UseTabs": ["describes whether tabs should be used instead of spaces",
"SpaceConsistencyBear",
"SomeOtherBear"]}
:param section:
The section the action corresponds to.
:return:
A dictionary with the settings name as key and the given value as
value.
"""
if not isinstance(settings_names_dict, dict):
raise TypeError('The settings_names_dict parameter has to be a '
'dictionary.')
result = {}
for setting_name, arr in sorted(settings_names_dict.items(),
key=lambda x: (join_names(x[1][1:]), x[0])):
value = require_setting(setting_name, arr, section)
result.update({setting_name: value} if value is not None else {})
return result
def get_action_info(section, action, failed_actions):
"""
Gets all the required Settings for an action. It updates the section with
the Settings.
:param section: The section the action corresponds to.
:param action: The action to get the info for.
:param failed_actions: A set of all actions that have failed. A failed
action remains in the list until it is successfully
executed.
:return: Action name and the updated section.
"""
params = action.non_optional_params
for param_name in params:
if param_name not in section or action.name in failed_actions:
question = format_lines(
"Please enter a value for the parameter '{}' ({}): "
.format(param_name, params[param_name][0]), symbol='!')
section.append(Setting(param_name, input(question)))
return action.name, section
def choose_action(console_printer, actions, apply_single=False):
"""
Presents the actions available to the user and takes as input the action
the user wants to choose.
:param console_printer: Object to print messages on the console.
:param actions: Actions available to the user.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:return: Return a tuple of lists, a list with the names of
actions that needs to be applied and a list with
with the description of the actions.
"""
actions.insert(0, DoNothingAction().get_metadata())
actions_desc = []
actions_name = []
if apply_single:
for i, action in enumerate(actions, 0):
if apply_single == action.desc:
return ([action.desc], [action.name])
return (['Do (N)othing'], ['Do (N)othing'])
else:
while True:
for i, action in enumerate(actions, 0):
output = '{:>2}. {}' if i != 0 else '*{}. {}'
color_letter(console_printer, format_lines(output.format(
i, action.desc), symbol='['))
line = format_lines(STR_ENTER_NUMBER, symbol='[')
choice = input(line)
choice = str(choice)
for c in choice:
c = str(c)
actions_desc_len = len(actions_desc)
if c.isnumeric():
for i, action in enumerate(actions, 0):
c = int(c)
if i == c:
actions_desc.append(action.desc)
actions_name.append(action.name)
break
elif c.isalpha():
c = c.upper()
c = '(' + c + ')'
for i, action in enumerate(actions, 1):
if c in action.desc:
actions_desc.append(action.desc)
actions_name.append(action.name)
break
if actions_desc_len == len(actions_desc):
console_printer.print(format_lines(
'Please enter a valid letter.', symbol='['))
if not choice:
actions_desc.append(DoNothingAction().get_metadata().desc)
actions_name.append(DoNothingAction().get_metadata().name)
return (actions_desc, actions_name)
def try_to_apply_action(action_name,
chosen_action,
console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions):
"""
Try to apply the given action.
:param action_name: The name of the action.
:param choose_action: The action object that will be applied.
:param console_printer: Object to print messages on the console.
:param section: Currently active section.
:param metadata_list: Contains metadata for all the actions.
:param action_dict: Contains the action names as keys and their
references as values.
:param failed_actions: A set of all actions that have failed. A failed
action remains in the list until it is successfully
executed.
:param result: Result corresponding to the actions.
:param file_diff_dict: If it is an action which applies a patch, this
contains the diff of the patch to be applied to
the file with filename as keys.
:param applied_actions: A dictionary that contains the result, file_dict,
file_diff_dict and the section for an action.
:param file_dict: Dictionary with filename as keys and its contents
as values.
"""
try:
chosen_action.apply_from_section(result,
file_dict,
file_diff_dict,
section)
console_printer.print(
format_lines(chosen_action.SUCCESS_MESSAGE, symbol='['),
color=SUCCESS_COLOR)
applied_actions[action_name] = [copy.copy(result), copy.copy(
file_dict),
copy.copy(file_diff_dict),
copy.copy(section)]
result.set_applied_actions(applied_actions)
failed_actions.discard(action_name)
except Exception as exception: # pylint: disable=broad-except
logging.error('Failed to execute the action {} with error: {}.'
.format(action_name, exception))
failed_actions.add(action_name)
def ask_for_action_and_apply(console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions,
apply_single=False):
"""
Asks the user for an action and applies it.
:param console_printer: Object to print messages on the console.
:param section: Currently active section.
:param metadata_list: Contains metadata for all the actions.
:param action_dict: Contains the action names as keys and their
references as values.
:param failed_actions: A set of all actions that have failed. A failed
action remains in the list until it is successfully
executed.
:param result: Result corresponding to the actions.
:param file_diff_dict: If it is an action which applies a patch, this
contains the diff of the patch to be applied to
the file with filename as keys.
:param file_dict: Dictionary with filename as keys and its contents
as values.
:param apply_single: The action that should be applied for all results.
If it's not selected, has a value of False.
:param applied_actions: A dictionary that contains the result, file_dict,
file_diff_dict and the section for an action.
:return: Returns a boolean value. True will be returned, if
it makes sense that the user may choose to execute
another action, False otherwise.
"""
actions_desc, actions_name = choose_action(console_printer, metadata_list,
apply_single)
if apply_single:
if apply_single == 'Do (N)othing':
return False
for index, action_details in enumerate(metadata_list, 1):
if apply_single == action_details.desc:
action_name, section = get_action_info(
section, metadata_list[index - 1], failed_actions)
chosen_action = action_dict[action_details.name]
try_to_apply_action(action_name,
chosen_action,
console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions)
else:
for action_choice, action_choice_name in zip(actions_desc,
actions_name):
if action_choice == 'Do (N)othing':
return False
chosen_action = action_dict[action_choice_name]
action_choice_made = action_choice
for index, action_details in enumerate(metadata_list, 1):
if action_choice_made in action_details.desc:
action_name, section = get_action_info(
section, metadata_list[index-1], failed_actions)
try_to_apply_action(action_name,
chosen_action,
console_printer,
section,
metadata_list,
action_dict,
failed_actions,
result,
file_diff_dict,
file_dict,
applied_actions)
return True
def show_enumeration(console_printer,
title,
items,
indentation,
no_items_text):
"""
This function takes as input an iterable object (preferably a list or
a dict) and prints it in a stylized format. If the iterable object is
empty, it prints a specific statement given by the user. An e.g :
<indentation>Title:
<indentation> * Item 1
<indentation> * Item 2
:param console_printer: Object to print messages on the console.
:param title: Title of the text to be printed
:param items: The iterable object.
:param indentation: Number of spaces to indent every line by.
:param no_items_text: Text printed when iterable object is empty.
"""
if not items:
console_printer.print(indentation + no_items_text)
else:
console_printer.print(indentation + title)
if isinstance(items, dict):
for key, value in items.items():
console_printer.print(indentation + ' * ' + key + ': ' +
value[0])
else:
for item in items:
console_printer.print(indentation + ' * ' + item)
console_printer.print()
def show_bear(bear,
show_description,
show_params,
console_printer):
"""
Displays all information about a bear.
:param bear: The bear to be displayed.
:param show_description: True if the main description should be shown.
:param show_params: True if the details should be shown.
:param console_printer: Object to print messages on the console.
"""
console_printer.print(bear.name, color='blue')
if not show_description and not show_params:
return
metadata = bear.get_metadata()
if show_description:
console_printer.print(
' ' + metadata.desc.replace('\n', '\n '))
console_printer.print() # Add a newline
if show_params:
show_enumeration(
console_printer, 'Supported languages:',
bear.LANGUAGES,
' ',
'The bear does not provide information about which languages '
'it can analyze.')
show_enumeration(console_printer,
'Needed Settings:',
metadata.non_optional_params,
' ',
'No needed settings.')
show_enumeration(console_printer,
'Optional Settings:',
metadata.optional_params,
' ',
'No optional settings.')
show_enumeration(console_printer,
'Can detect:',
bear.can_detect,
' ',
'This bear does not provide information about what '
'categories it can detect.')
show_enumeration(console_printer,
'Can fix:',
bear.CAN_FIX,
' ',
'This bear cannot fix issues or does not provide '
'information about what categories it can fix.')
console_printer.print(
' Path:\n' + ' ' + repr(bear.source_location) + '\n')
def print_bears(bears,
show_description,
show_params,
console_printer,
args=None):
"""
Presents all bears being used in a stylized manner.
:param bears: It's a dictionary with bears as keys and list of
sections containing those bears as values.
:param show_description: True if the main description of the bears should
be shown.
:param show_params: True if the parameters and their description
should be shown.
:param console_printer: Object to print messages on the console.
:param args: Args passed to coala command.
"""
if not bears:
console_printer.print('No bears to show. Did you forget to install '
'the `coala-bears` package? Try `pip3 install '
'coala-bears`.')
return
results = [bear for bear, _ in sorted(bears.items(),
key=lambda bear_tuple:
bear_tuple[0].name.lower())]
if args and args.json:
from coalib.output.JSONEncoder import create_json_encoder
JSONEncoder = create_json_encoder(use_relpath=args.relpath)
json_output = {'bears': results}
import json
json_formatted_output = json.dumps(json_output,
cls=JSONEncoder,
sort_keys=True,
indent=2,
separators=(',', ': '))
if args.output:
filename = args.output[0]
with open(filename, 'w') as fp:
fp.write(json_formatted_output)
else:
print(json_formatted_output)
elif args and args.format:
print_bears_formatted(results)
else:
for bear in results:
show_bear(bear,
show_description,
show_params,
console_printer)
def show_bears(local_bears,
global_bears,
show_description,
show_params,
console_printer,
args=None):
"""
Extracts all the bears from each enabled section or the sections in the
targets and passes a dictionary to the show_bears_callback method.
:param local_bears: Dictionary of local bears with section names
as keys and bear list as values.
:param global_bears: Dictionary of global bears with section
names as keys and bear list as values.
:param show_description: True if the main description of the bears should
be shown.
:param show_params: True if the parameters and their description
should be shown.
:param console_printer: Object to print messages on the console.
:param args: Args passed to coala command.
"""
bears = inverse_dicts(local_bears, global_bears)
print_bears(bears, show_description, show_params, console_printer, args)
def show_language_bears_capabilities(language_bears_capabilities,
console_printer):
"""
Displays what the bears can detect and fix.
:param language_bears_capabilities:
Dictionary with languages as keys and their bears' capabilities as
values. The capabilities are stored in a tuple of two elements where the
first one represents what the bears can detect, and the second one what
they can fix.
:param console_printer:
Object to print messages on the console.
"""
if not language_bears_capabilities:
console_printer.print('There is no bear available for this language')
else:
for language, capabilities in language_bears_capabilities.items():
if capabilities[0]:
console_printer.print(
'coala can do the following for ', end='')
console_printer.print(language.upper(), color='blue')
console_printer.print(' Can detect only: ', end='')
console_printer.print(
', '.join(sorted(capabilities[0])), color=CAPABILITY_COLOR)
if capabilities[1]:
console_printer.print(' Can fix : ', end='')
console_printer.print(
', '.join(sorted(capabilities[1])),
color=CAPABILITY_COLOR)
else:
console_printer.print('coala does not support ', color='red',
end='')
console_printer.print(language, color='blue')
| nemaniarjun/coala | coalib/output/ConsoleInteraction.py | Python | agpl-3.0 | 42,987 |
from django.contrib import admin
from concerns.models import Concern
class ConcernAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'reporter', 'status', 'resolved', 'created')
admin.site.register(Concern, ConcernAdmin)
| chop-dbhi/django-concerns | concerns/admin.py | Python | bsd-2-clause | 233 |
#######################################
# Code coded by Mike Doty
#
# If you want trackball checking, you will
# have to code it yourself. Sorry!
#
# Oh, and it just grabs the first joystick.
# Yes, that makes me lazy.
#
# Released February 8, 2008.
#######################################
import pygame
from pygame.locals import *
class App:
def __init__(self):
pygame.init()
pygame.display.set_caption("Joystick Analyzer")
# Set up the joystick
pygame.joystick.init()
self.my_joystick = None
self.joystick_names = []
# Enumerate joysticks
for i in range(0, pygame.joystick.get_count()):
self.joystick_names.append(pygame.joystick.Joystick(i).get_name())
print self.joystick_names
# By default, load the first available joystick.
if (len(self.joystick_names) > 0):
self.my_joystick = pygame.joystick.Joystick(0)
self.my_joystick.init()
max_joy = max(self.my_joystick.get_numaxes(),
self.my_joystick.get_numbuttons(),
self.my_joystick.get_numhats())
self.screen = pygame.display.set_mode( (max_joy * 30 + 10, 170) )
self.font = pygame.font.SysFont("Courier", 20)
# A couple of joystick functions...
def check_axis(self, p_axis):
if (self.my_joystick):
if (p_axis < self.my_joystick.get_numaxes()):
return self.my_joystick.get_axis(p_axis)
return 0
def check_button(self, p_button):
if (self.my_joystick):
if (p_button < self.my_joystick.get_numbuttons()):
return self.my_joystick.get_button(p_button)
return False
def check_hat(self, p_hat):
if (self.my_joystick):
if (p_hat < self.my_joystick.get_numhats()):
return self.my_joystick.get_hat(p_hat)
return (0, 0)
def draw_text(self, text, x, y, color, align_right=False):
surface = self.font.render(text, True, color, (0, 0, 0))
surface.set_colorkey( (0, 0, 0) )
self.screen.blit(surface, (x, y))
def center_text(self, text, x, y, color):
surface = self.font.render(text, True, color, (0, 0, 0))
surface.set_colorkey( (0, 0, 0) )
self.screen.blit(surface, (x - surface.get_width() / 2,
y - surface.get_height() / 2))
def main(self):
while (True):
self.g_keys = pygame.event.get()
self.screen.fill(0)
for event in self.g_keys:
if (event.type == KEYDOWN and event.key == K_ESCAPE):
self.quit()
return
elif (event.type == QUIT):
self.quit()
return
self.draw_text("Joystick Name: %s" % self.joystick_names[0],
5, 5, (0, 255, 0))
self.draw_text("Axes (%d)" % self.my_joystick.get_numaxes(),
5, 25, (255, 255, 255))
for i in range(0, self.my_joystick.get_numaxes()):
if (self.my_joystick.get_axis(i)):
pygame.draw.circle(self.screen, (0, 0, 200),
(20 + (i * 30), 50), 10, 0)
else:
pygame.draw.circle(self.screen, (255, 0, 0),
(20 + (i * 30), 50), 10, 0)
self.center_text("%d" % i, 20 + (i * 30), 50, (255, 255, 255))
self.draw_text("Buttons (%d)" % self.my_joystick.get_numbuttons(),
5, 75, (255, 255, 255))
for i in range(0, self.my_joystick.get_numbuttons()):
if (self.my_joystick.get_button(i)):
pygame.draw.circle(self.screen, (0, 0, 200),
(20 + (i * 30), 100), 10, 0)
print i
print (self.my_joystick.get_button(i) == True)
else:
pygame.draw.circle(self.screen, (255, 0, 0),
(20 + (i * 30), 100), 10, 0)
self.center_text("%d" % i, 20 + (i * 30), 100, (255, 255, 255))
self.draw_text("POV Hats (%d)" % self.my_joystick.get_numhats(),
5, 125, (255, 255, 255))
for i in range(0, self.my_joystick.get_numhats()):
if (self.my_joystick.get_hat(i) != (0, 0)):
pygame.draw.circle(self.screen, (0, 0, 200),
(20 + (i * 30), 150), 10, 0)
else:
pygame.draw.circle(self.screen, (255, 0, 0),
(20 + (i * 30), 150), 10, 0)
self.center_text("%d" % i, 20 + (i * 30), 100, (255, 255, 255))
pygame.display.flip()
def quit(self):
pygame.display.quit()
app = App()
app.main() | carlfsmith/LunaBot | 2013/Control/joystick_example.py | Python | apache-2.0 | 5,043 |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
# compatibilidade entre Python2 e 3.
"""
Django settings for sigsis project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path_pc = '//home/amilcar/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b2$3k_5pcq6#ed&ax+v9iz%k)^q-%-)y!z#jj^45e23*tt+f^5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['http://localhost:8000/sigsis/']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.webdesign',
'tinymce',
'sorl.thumbnail',
'mce_filebrowser',
'core',
'widget_tweaks',# biblioteca para customizar elementos do form simulador no template
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'sigsis.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [path_pc + 'sigsis/core/html/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sigsis.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sigsis',
'HOST': '127.0.0.1',
'PORT': '3306',
'USER': 'admin',
'PASSWORD': 'admin',
'TIME_ZONE': 'America/Sao_Paulo'
}}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
USE_I18N = True
USE_L10N = True
#USE_TZ = True # Se deixar dá erro no DB!!!
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_ROOT = path_pc + 'sigsis/core/media/'
MEDIA_URL = '/sigsis/media/'
STATIC_URL = '/sigsis/static/'
STATIC_ROOT = path_pc + 'sigsis/core/static/'
# Additional locations of static files
STATICFILES_DIRS = (path_pc + 'sigsis/core/media',)# é preciso manter a vírgula
TINYMCE_JS_URL = os.path.join(STATIC_URL, 'tiny_mce/tiny_mce.js' )
TINYMCE_JS_ROOT = STATIC_ROOT + 'tiny_mce/'
#TINYMCE_FILEBROWSER = True # com isso não aparece a busca da inserção de imagem no admin
TINYMCE_SPELLCHECKER = True
#TINYMCE_COMPRESSOR = True # com esse parâmetro não funciona
TINYMCE_DEFAULT_CONFIG = {
'plugins' : "pagebreak, spellchecker, style, layer, table, lists, save, advhr, advimage, advlink, autolink, emotions, iespell, inlinepopups, insertdatetime, preview, media, searchreplace, print, contextmenu, paste, directionality, fullscreen, noneditable, visualchars, nonbreaking, xhtmlxtras, template, wordcount, advlist, autosave, media, visualblocks,",
'theme': "advanced",
# Theme options
'theme_advanced_buttons1' : "bold,italic,underline,strikethrough,sub,sup,|,removeformat,visualchars,|,justifyleft,justifycenter,justifyright,justifyfull,|,formatselect,fontselect,fontsizeselect,|,forecolor,backcolor,|,bullist,numlist,|,outdent,indent,blockquote,|,undo,redo,|,link,unlink,|,code,|,fullscreen,|",
'theme_advanced_buttons2' : "tablecontrols,|,hr,|,charmap,image,media,insertfile,anchor,|,search,replace,|",
'theme_advanced_toolbar_location' : "top",
'theme_advanced_toolbar_align' : "left",
'theme_advanced_statusbar_location' : "bottom",
'theme_advanced_resizing' : 'true',
'file_browser_callback': 'mce_filebrowser',
'width': '100%',
'height': '200',
'cleanup_on_startup': True,
'custom_uno_redo_levels': 10,
'nowrap': False,
#'relative_urls': False,
#'paste_text_sticky': True,
#'paste_text_sticky_default' : True,
#'style_formats' : "[{title : 'Bold text', inline : 'strong'},{title : 'Red text', inline : 'span', styles : {color : '#ff0000'}},{title : 'Help', inline : 'strong', classes : 'help'},{title : 'Table styles'},{title : 'Table row 1', selector : 'tr', classes : 'tablerow'}]",
}
#Sessao
SESSION_COOKIE_AGE = 43200 # 12 horas * 60 minutos * 60 segundos
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SAVE_EVERY_REQUEST = True
| amilcarsimm/sigsis-tcc | sigsis/settings.py | Python | gpl-3.0 | 5,488 |
from aldryn_client import forms
class Form(forms.BaseForm):
def to_settings(self, data, settings):
return settings
| aldryn/aldryn-snake | aldryn_config.py | Python | bsd-2-clause | 129 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Server side functions for tagging.
- Tags can be added to any record (doctype, name) in the system.
- Items are filtered by tags
- Top tags are shown in the sidebar (?)
- Tags are also identified by the tag_fields property of the DocType
Discussion:
Tags are shown in the docbrowser and ideally where-ever items are searched.
There should also be statistics available for tags (like top tags etc)
Design:
- free tags (user_tags) are stored in __user_tags
- doctype tags are set in tag_fields property of the doctype
- top tags merges the tags from both the lists (only refreshes once an hour (max))
"""
import frappe
def check_user_tags(dt):
"if the user does not have a tags column, then it creates one"
try:
frappe.db.sql("select `_user_tags` from `tab%s` limit 1" % dt)
except Exception, e:
if e.args[0] == 1054:
DocTags(dt).setup()
@frappe.whitelist()
def add_tag():
"adds a new tag to a record, and creates the Tag master"
f = frappe.local.form_dict
tag, color = f.get('tag'), f.get('color')
dt, dn = f.get('dt'), f.get('dn')
DocTags(dt).add(dn, tag)
return tag
@frappe.whitelist()
def remove_tag():
"removes tag from the record"
f = frappe.local.form_dict
tag, dt, dn = f.get('tag'), f.get('dt'), f.get('dn')
DocTags(dt).remove(dn, tag)
class DocTags:
"""Tags for a particular doctype"""
def __init__(self, dt):
self.dt = dt
def get_tag_fields(self):
"""returns tag_fields property"""
return frappe.db.get_value('DocType', self.dt, 'tag_fields')
def get_tags(self, dn):
"""returns tag for a particular item"""
return (frappe.db.get_value(self.dt, dn, '_user_tags', ignore=1) or '').strip()
def add(self, dn, tag):
"""add a new user tag"""
tl = self.get_tags(dn).split(',')
if not tag in tl:
tl.append(tag)
self.update(dn, tl)
def remove(self, dn, tag):
"""remove a user tag"""
tl = self.get_tags(dn).split(',')
self.update(dn, filter(lambda x:x!=tag, tl))
def remove_all(self, dn):
"""remove all user tags (call before delete)"""
self.update(dn, [])
def update(self, dn, tl):
"""updates the _user_tag column in the table"""
if not tl:
tags = ''
else:
tl = list(set(filter(lambda x: x, tl)))
tags = ',' + ','.join(tl)
try:
frappe.db.sql("update `tab%s` set _user_tags=%s where name=%s" % \
(self.dt,'%s','%s'), (tags , dn))
except Exception, e:
if e.args[0]==1054:
if not tags:
# no tags, nothing to do
return
self.setup()
self.update(dn, tl)
else: raise
def setup(self):
"""adds the _user_tags column if not exists"""
from frappe.model.db_schema import add_column
add_column(self.dt, "_user_tags", "Data")
| geo-poland/frappe | frappe/widgets/tags.py | Python | mit | 2,808 |
# coding: utf-8
# # Multiclass Support Vector Machine exercise
#
# *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
#
# In this exercise you will:
#
# - implement a fully-vectorized **loss function** for the SVM
# - implement the fully-vectorized expression for its **analytic gradient**
# - **check your implementation** using numerical gradient
# - use a validation set to **tune the learning rate and regularization** strength
# - **optimize** the loss function with **SGD**
# - **visualize** the final learned weights
#
# In[1]:
# Run some setup code for this notebook.
from __future__ import print_function
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
# get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# get_ipython().magic('load_ext autoreload')
# get_ipython().magic('autoreload 2')
# ## CIFAR-10 Data Loading and Preprocessing
# In[2]:
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# In[3]:
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# In[4]:
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# In[5]:
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
# In[6]:
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# In[7]:
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# In[8]:
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
# ## SVM Classifier
#
# Your code for this section will all be written inside **cs231n/classifiers/linear_svm.py**.
#
# As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
# In[9]:
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of ·small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.000005)
print('loss: %f' % (loss, ))
# The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.
#
# To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
# In[16]:
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# ### Inline Question 1:
# It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable*
#
# **Your Answer:** *fill this in.*
# In[39]:
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# The losses should match but your vectorized implementation should be much faster.
print('difference: %f' % (loss_naive - loss_vectorized))
# In[19]:
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss and gradient: computed in %fs' % (toc - tic))
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss and gradient: computed in %fs' % (toc - tic))
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('difference: %f' % difference)
# ### Stochastic Gradient Descent
#
# We now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
# In[ ]:
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=2.5e4,
num_iters=1500, verbose=True)
toc = time.time()
print('That took %fs' % (toc - tic))
# In[ ]:
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# In[ ]:
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print('training accuracy: %f' % (np.mean(y_train == y_train_pred), ))
y_val_pred = svm.predict(X_val)
print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), ))
# In[ ]:
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [2.5e4, 5e4]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# In[ ]:
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# In[ ]:
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
# In[ ]:
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
# ### Inline question 2:
# Describe what your visualized SVM weights look like, and offer a brief explanation for why they look they way that they do.
#
# **Your answer:** *fill this in*
| DavidQiuChao/CS231nHomeWorks | assignment1/svm.py | Python | mit | 14,717 |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import jinja2
import os
import webapp2
import logging
from google.appengine.api import memcache
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainPage(webapp2.RequestHandler):
def get(self):
greetings = memcache.get('entries')
if greetings is None:
greetings = []
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(entries=greetings))
def post(self):
greeting = self.request.get('entry')
greetings = memcache.get('entries')
if greetings is not None:
greetings.append(greeting)
if not memcache.replace('entries', greetings):
logging.error('Memcache replace failed.')
else:
greetings = [greeting]
if not memcache.set('entries', greetings):
logging.error('Memcache set failed.')
self.redirect('/')
class Clear(webapp2.RequestHandler):
def post(self):
if not memcache.delete('entries'):
logging.error("Memcache failed to delete entries")
self.redirect('/')
application = webapp2.WSGIApplication([
('/', MainPage),
('/clear', Clear)
], debug=True)
| GoogleCloudPlatformTraining/cp100-appengine-memcache-python | guestbook.py | Python | apache-2.0 | 1,998 |
import locale
_localecharset = locale.getpreferredencoding()
def encode(string):
return string.encode(_localecharset, "replace")
def decode(_bytes):
return _bytes.decode(_localecharset, "replace")
| joerg-lehmann/PyTone | src/encoding.py | Python | gpl-2.0 | 208 |
## This is a dummy router for now
##
## In the future, it will route read replica calls to the right place
import logging
log=logging.getLogger(__name__)
class MITxRouter(object):
"""
A router to control all database operations on models in the
auth application.
"""
def db_for_read(self, model, **hints):
if model._meta.app_label in ['student','courseware', 'contenttypes']:
return 'remote'
elif model._meta.app_label in ['an_evt','edxmodules', 'prototypemodules', 'djmodules', 'cronjobs', 'celery', 'sessions', 'auth', 'django_cache', 'south', 'sites']:
return 'default'
else:
log.error("We need to explicitly route: {0}".format(model._meta.app_label))
return 'error'
def db_for_write(self, model, **hints):
"""
Attempts to write auth models go to auth_db.
"""
#if model._meta.app_label == 'auth':
# return 'auth_db'
print "db_for_write", model
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth app is involved.
"""
#if obj1._meta.app_label == 'auth' or \
# obj2._meta.app_label == 'auth':
# return True
print "allow_relation", obj1, obj2
return None
def allow_syncdb(self, db, model):
"""
Make sure the auth app only appears in the 'auth_db'
database.
"""
print "allow_syncdb", db, model
#if db == 'auth_db':
# return model._meta.app_label == 'auth'
#elif model._meta.app_label == 'auth':
# return False
return None
| edx/edxanalytics | src/edxanalytics/edxanalytics/mitxrouter.py | Python | agpl-3.0 | 1,694 |
# distinfo.py - provide meta information for distro repositories
#
# Copyright (c) 2005 Gustavo Noronha Silva <[email protected]>
# Copyright (c) 2006-2007 Sebastian Heinlein <[email protected]>
#
# Authors: Gustavo Noronha Silva <[email protected]>
# Sebastian Heinlein <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
from __future__ import print_function
import errno
import logging
import os
from subprocess import Popen, PIPE
import re
import apt_pkg
from apt_pkg import gettext as _
class Template(object):
def __init__(self):
self.name = None
self.child = False
self.parents = [] # ref to parent template(s)
self.match_name = None
self.description = None
self.base_uri = None
self.type = None
self.components = []
self.children = []
self.match_uri = None
self.mirror_set = {}
self.distribution = None
self.available = True
self.official = True
def has_component(self, comp):
''' Check if the distribution provides the given component '''
return comp in (c.name for c in self.components)
def is_mirror(self, url):
''' Check if a given url of a repository is a valid mirror '''
proto, hostname, dir = split_url(url)
if hostname in self.mirror_set:
return self.mirror_set[hostname].has_repository(proto, dir)
else:
return False
class Component(object):
def __init__(self, name, desc=None, long_desc=None, parent_component=None):
self.name = name
self.description = desc
self.description_long = long_desc
self.parent_component = parent_component
def get_parent_component(self):
return self.parent_component
def set_parent_component(self, parent):
self.parent_component = parent
def get_description(self):
if self.description_long is not None:
return self.description_long
elif self.description is not None:
return self.description
else:
return None
def set_description(self, desc):
self.description = desc
def set_description_long(self, desc):
self.description_long = desc
def get_description_long(self):
return self.description_long
class Mirror(object):
''' Storage for mirror related information '''
def __init__(self, proto, hostname, dir, location=None):
self.hostname = hostname
self.repositories = []
self.add_repository(proto, dir)
self.location = location
def add_repository(self, proto, dir):
self.repositories.append(Repository(proto, dir))
def get_repositories_for_proto(self, proto):
return [r for r in self.repositories if r.proto == proto]
def has_repository(self, proto, dir):
if dir is None:
return False
for r in self.repositories:
if r.proto == proto and dir in r.dir:
return True
return False
def get_repo_urls(self):
return [r.get_url(self.hostname) for r in self.repositories]
def get_location(self):
return self.location
def set_location(self, location):
self.location = location
class Repository(object):
def __init__(self, proto, dir):
self.proto = proto
self.dir = dir
def get_info(self):
return self.proto, self.dir
def get_url(self, hostname):
return "%s://%s/%s" % (self.proto, hostname, self.dir)
def split_url(url):
''' split a given URL into the protocoll, the hostname and the dir part '''
split = re.split(":*\/+", url, maxsplit=2)
while len(split) < 3:
split.append(None)
return split
class DistInfo(object):
def __init__(self,
dist = None,
base_dir = "/usr/share/python-apt/templates"):
self.metarelease_uri = ''
self.templates = []
self.arch = apt_pkg.config.find("APT::Architecture")
location = None
match_loc = re.compile(r"^#LOC:(.+)$")
match_mirror_line = re.compile(
r"^(#LOC:.+)|(((http)|(ftp)|(rsync)|(file)|(mirror)|(https))://"
r"[A-Za-z0-9/\.:\-_@]+)$")
#match_mirror_line = re.compile(r".+")
if not dist:
try:
dist = Popen(["lsb_release", "-i", "-s"],
stdout=PIPE).communicate()[0].strip()
except OSError as exc:
if exc.errno != errno.ENOENT:
logging.warn('lsb_release failed, using defaults:' % exc)
dist = "Debian"
self.dist = dist
map_mirror_sets = {}
dist_fname = "%s/%s.info" % (base_dir, dist)
with open(dist_fname) as dist_file:
template = None
component = None
for line in dist_file:
tokens = line.split(':', 1)
if len(tokens) < 2:
continue
field = tokens[0].strip()
value = tokens[1].strip()
if field == 'ChangelogURI':
self.changelogs_uri = _(value)
elif field == 'MetaReleaseURI':
self.metarelease_uri = value
elif field == 'Suite':
self.finish_template(template, component)
component=None
template = Template()
template.name = value
template.distribution = dist
template.match_name = "^%s$" % value
elif field == 'MatchName':
template.match_name = value
elif field == 'ParentSuite':
template.child = True
for nanny in self.templates:
# look for parent and add back ref to it
if nanny.name == value:
template.parents.append(nanny)
nanny.children.append(template)
elif field == 'Available':
template.available = apt_pkg.string_to_bool(value)
elif field == 'Official':
template.official = apt_pkg.string_to_bool(value)
elif field == 'RepositoryType':
template.type = value
elif field == 'BaseURI' and not template.base_uri:
template.base_uri = value
elif field == 'BaseURI-%s' % self.arch:
template.base_uri = value
elif field == 'MatchURI' and not template.match_uri:
template.match_uri = value
elif field == 'MatchURI-%s' % self.arch:
template.match_uri = value
elif (field == 'MirrorsFile' or
field == 'MirrorsFile-%s' % self.arch):
# Make the path absolute.
value = os.path.isabs(value) and value or \
os.path.abspath(os.path.join(base_dir, value))
if value not in map_mirror_sets:
mirror_set = {}
try:
with open(value) as value_f:
mirror_data = list(filter(
match_mirror_line.match,
[x.strip() for x in value_f]))
except Exception:
print("WARNING: Failed to read mirror file")
mirror_data = []
for line in mirror_data:
if line.startswith("#LOC:"):
location = match_loc.sub(r"\1", line)
continue
(proto, hostname, dir) = split_url(line)
if hostname in mirror_set:
mirror_set[hostname].add_repository(proto, dir)
else:
mirror_set[hostname] = Mirror(
proto, hostname, dir, location)
map_mirror_sets[value] = mirror_set
template.mirror_set = map_mirror_sets[value]
elif field == 'Description':
template.description = _(value)
elif field == 'Component':
if (component and not
template.has_component(component.name)):
template.components.append(component)
component = Component(value)
elif field == 'CompDescription':
component.set_description(_(value))
elif field == 'CompDescriptionLong':
component.set_description_long(_(value))
elif field == 'ParentComponent':
component.set_parent_component(value)
self.finish_template(template, component)
template=None
component=None
def finish_template(self, template, component):
" finish the current tempalte "
if not template:
return
# reuse some properties of the parent template
if template.match_uri is None and template.child:
for t in template.parents:
if t.match_uri:
template.match_uri = t.match_uri
break
if template.mirror_set == {} and template.child:
for t in template.parents:
if t.match_uri:
template.mirror_set = t.mirror_set
break
if component and not template.has_component(component.name):
template.components.append(component)
component = None
# the official attribute is inherited
for t in template.parents:
template.official = t.official
self.templates.append(template)
if __name__ == "__main__":
d = DistInfo("Ubuntu", "/usr/share/python-apt/templates")
logging.info(d.changelogs_uri)
for template in d.templates:
logging.info("\nSuite: %s" % template.name)
logging.info("Desc: %s" % template.description)
logging.info("BaseURI: %s" % template.base_uri)
logging.info("MatchURI: %s" % template.match_uri)
if template.mirror_set != {}:
logging.info("Mirrors: %s" % list(template.mirror_set.keys()))
for comp in template.components:
logging.info(" %s -%s -%s" % (comp.name,
comp.description,
comp.description_long))
for child in template.children:
logging.info(" %s" % child.description)
| 2ndy/RaspIM | usr/share/pyshared/aptsources/distinfo.py | Python | gpl-2.0 | 11,567 |
#!/usr/bin/env python
# mainly for sys.argv[], sys.argv[0] is the name of the program
import sys
def sqroot(anum):
"""
Compute square root of a number via Newton's method
Newton's method is to approximate z = Sqrt(x) by picking a starting point z and then repeating:
z = z - (z*z - x)/(2*z)
"""
try:
initguess = anum / 2.
epsilon = 0.000001
diff = initguess
newres = 0
while diff > epsilon:
newres = initguess - (initguess**2 - anum) / (2. * initguess)
diff = abs(newres - initguess)
initguess = newres
except ZeroDivisionError:
print 'oops, divided by zero!'
else:
return initguess, newres
if __name__ == '__main__':
init, new = sqroot(float(sys.argv[1]))
print new
| ketancmaheshwari/hello-goog | src/python/sqroot.py | Python | apache-2.0 | 805 |
"""
Python 3.7 で 辞書の挿入順序が保持されることを確認するサンプルです。
REFERENCES:: http://bit.ly/2VIggXP
http://bit.ly/2VySRIe
http://bit.ly/2VFhjI4
http://bit.ly/2VEq058
http://bit.ly/2VBKrzK
"""
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
"""コードのネタは http://bit.ly/2VBKrzK から拝借"""
languages = ['Python', 'Ruby', 'Perl', 'Python', 'JavaScript']
# sorted + set + list.index を組み合わせて 順序キープ しながら重複削除 (from http://bit.ly/2VBKrzK)
pr('sorted + set + list.index', sorted(set(languages), key=languages.index))
# python 3.7 からは dict にて挿入順序が保持されることがPython 言語仕様の一部となったので、これでも良い
pr('dict (python 3.7)', list(dict.fromkeys(languages)))
# ちなみに 順序保証 をしない set() を使うと重複削除はできるけど、当然順序はキープできない
pr('set (python 3.7)', list(set(languages)))
def go():
obj = Sample()
obj.exec()
| devlights/try-python | trypython/basic/dict_/dict_preserved_insert_order_py37.py | Python | mit | 1,231 |
# -*- coding: UTF-8 -*-
# Copyright (C) 2017 Alexandre Bonny <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
from unittest import TestCase, main
# Import from itools
from itools.web.dispatcher import URIDispatcher
from itools.core import OrderedDict
class DispatcherTestCase(TestCase):
def setUp(self):
self.dispatcher = URIDispatcher()
def _check_matching_method(self, urls):
for url, method in urls:
method_selected, _ = self.dispatcher.resolve(url)
assert method_selected == method
def _register_routes(self, patterns):
# Clear patterns
self.dispatcher.patterns = OrderedDict()
# Register in dispatcher
for route, method in patterns:
self.dispatcher.add(route, method)
def test_pattern_matching(self):
patterns = [
('/one', 'ROUTE1'),
('/one/{name}', 'ROUTE11'),
('/one/{name}/two', 'ROUTE2'),
('/two/three', 'ROUTE3'),
('/two/three/{name}', 'ROUTE33'),
('/ui/{name:chunk}.{extension:chunk}', 'ROUTEICON'),
('/ui/skin/{name:any}', 'ROUTEALL'),
]
match_urls = [
('/one', 'ROUTE1'),
('/one/hello', 'ROUTE11'),
('/one/hello/two', 'ROUTE2'),
('/two/three', 'ROUTE3'),
('/two/three/test', 'ROUTE33'),
('/ui/favicon.ico', 'ROUTEICON'),
('/ui/skin/favicon.ico', 'ROUTEALL'),
('/ui/skin/images/image.png', 'ROUTEALL'),
]
bad_urls = [
'/one/',
'one',
'/one/hello/test',
'/hello/two',
]
# Register route patterns
self._register_routes(patterns)
# Check dispatcher route resolution
self._check_matching_method(match_urls)
# Check bad urls
for url in bad_urls:
assert self.dispatcher.resolve(url) is None
def test_patterns_params(self):
patterns = [
('/one/{param:digits}', 'DIGIT'),
('/one/{param:number}', 'NUMBER'),
('/one/{param:alpha}', 'ALPHA'),
('/one/{param:word}', 'WORD'),
('/one/{param:chunk}', 'CHUNK'),
('/one/{param:any}', 'ANY'),
]
urls = [
('/one/14', 'DIGIT'),
('/one/14.3', 'NUMBER'),
('/one/Hello', 'ALPHA'),
('/one/Hello1', 'WORD'),
('/one/Hello@1_8', 'CHUNK'),
('/one/Hello@1_8/any', 'ANY'),
]
# Register route patterns
self._register_routes(patterns)
# Check dispatcher route resolution with params
for url, method in urls:
method_selected, params = self.dispatcher.resolve(url)
assert method_selected == method
assert params.get('param') == url.replace('/one/', '')
def test_patterns_ordering(self):
patterns = [
('/one/{param}', 'FIRST'),
('/one/{param}/two', 'SECOND'),
('/one/{param:any}', 'THIRD'),
]
urls = [
('/one/test', 'FIRST'),
('/one/test/two', 'SECOND'),
('/one/test/two/three', 'THIRD'),
]
# Register route patterns
self._register_routes(patterns)
# Check dispatcher route resolution
self._check_matching_method(urls)
# Change ordering to verify catch all effect
patterns = [
('/one/{param:any}', 'FIRST'),
('/one/{param}', 'SECOND'),
('/one/{param}/two', 'THIRD'),
]
urls = [
('/one/test', 'FIRST'),
('/one/test/two', 'FIRST'),
('/one/test/two/three', 'FIRST'),
]
# Register route patterns
self._register_routes(patterns)
# Check dispatcher route resolution
self._check_matching_method(urls)
# Add special case taking advantage of ordering
patterns = [
('/one/14', 'SPECIAL'),
('/one/{param:digits}', 'DIGITS'),
]
urls = [
('/one/15', 'DIGITS'),
('/one/180', 'DIGITS'),
('/one/14', 'SPECIAL'),
]
# Register route patterns
self._register_routes(patterns)
# Check dispatcher route resolution
self._check_matching_method(urls)
# Other way around, special case never caught
patterns = [
('/one/{param:digits}', 'DIGITS'),
('/one/14', 'SPECIAL'),
]
urls = [
('/one/15', 'DIGITS'),
('/one/180', 'DIGITS'),
('/one/14', 'DIGITS'),
]
# Register route patterns
self._register_routes(patterns)
# Check dispatcher route resolution
self._check_matching_method(urls)
def test_patterns_override(self):
patterns = [
('/one/{param}', 'FIRST'),
('/one/{param}', 'SECOND'),
('/one/{param}', 'THIRD'),
]
# Last override method will be called
urls = [
('/one/two', 'THIRD'),
('/one/three', 'THIRD'),
]
# Register route patterns
self._register_routes(patterns)
# Check dispatcher route resolution
self._check_matching_method(urls)
if __name__ == '__main__':
main()
| bepatient-fr/itools | test/test_dispatcher.py | Python | gpl-3.0 | 6,186 |
# This file is part of sydpy.
#
# Copyright (C) 2014-2015 Bogdan Vukobratovic
#
# sydpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# sydpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with sydpy. If not, see
# <http://www.gnu.org/licenses/>.
from sydpy._util._injector import RequiredFeature
class SimtimeProgress(object):
configurator = RequiredFeature('Configurator')
def log_begin_timestep(self, time, sim):
if time > (self.old_time + self.step):
self.old_time = (time // self.step) * self.step
print("--{0}--".format(self.old_time))
return True
def __init__(self, sim_events):
self.step = self.configurator['SimtimeProgress', 'step', 100]
sim_events['timestep_start'].append(self.log_begin_timestep)
self.old_time = 0 | bogdanvuk/sydpy | sydpy/extens/simprog.py | Python | lgpl-2.1 | 1,325 |
from discord import Embed, Color
description = "Help"
CMDMAP = None
async def ex(message, client):
await client.send_message(message.author, embed=Embed(
color=Color.green(),
title="knechtBot - Help",
description="**[Here](https://docs.google.com/spreadsheets/d/e/2PACX-1vR_XkUXIhCNhnpqFu3N-grcDRWzLSTtZgtdRjdzMkhnS1n40_d9oITfEz1LzAKpDlLuTsmiiwUfbOh0/pubhtml)** you can see" \
"a full list of all user available commands of this bot! ;)"
).set_image(url="https://media.giphy.com/media/nY5qTLViCYjIc/giphy.gif"))
await client.delete_message(message) | zekroTJA/regiusBot | commands/cmd_help.py | Python | mit | 609 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from biffy import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'biffy.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', include('floweditor.urls')),
url(r'^floweditor/', include('floweditor.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login', views.userlogin, name='userlogin'),
url(r'^accounts/logout', views.userlogout, name='userlogout')
)
| PeterCloudyWillmott/biffy | biffy/urls.py | Python | mit | 519 |
from django.test import TestCase
from django.core.exceptions import ValidationError
from ...models import Validator
class ValidatorModelTest(TestCase):
def test_creation(self):
validator = Validator(name="name", regex=r"[a-zA-Z]+")
validator.save()
def test_invalid(self):
validator = Validator(name="name", regex=r"(")
self.assertRaises(ValidationError, validator.full_clean)
def test_string(self):
validator = Validator(name="name", regex=r"[a-zA-Z]+")
str(validator)
| ConstellationApps/Forms | constellation_forms/tests/models/testValidator.py | Python | isc | 534 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
""" Huawei ML2 Mechanism driver parameters.
Followings are connection parameters for Huawei ML2 Mechanism driver.
"""
HUAWEI_DRIVER_OPTS = [
cfg.StrOpt('username',
default='',
help=_('Username for communications to Huawei Switch.'
'This is required field.')),
cfg.StrOpt('password',
default='',
secret=True,
help=_('Password for communications to Huawei Switch.'
'This is required field.')),
cfg.StrOpt('hostaddr',
default='',
help=_('IP address of Huawei Switch.'
'This is required field.')),
cfg.StrOpt('portname',
default='',
help=_('The name of port connection to Huawei Switch.'
'This is required field.'))
]
cfg.CONF.register_opts(HUAWEI_DRIVER_OPTS, "ml2_huawei")
| huaweiswitch/neutron | neutron/plugins/ml2/drivers/huawei/config.py | Python | apache-2.0 | 1,533 |
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
'{{last_name}} {{company_suffix}}',
'{{first_name}} {{last_name}} s.p.',
)
company_suffixes = (
'd.o.o.', 'd.d.',
)
| danhuss/faker | faker/providers/company/sl_SI/__init__.py | Python | mit | 247 |
"""Base Treehopper API for Python.
This module provides digital and analog I/O, hardware and software PWM, I2C, SPI, and UART support.
"""
## @namespace treehopper.api
from treehopper.api.interfaces import *
from treehopper.api.pin import Pin, PinMode, ReferenceLevel
from treehopper.api.device_commands import DeviceCommands
from treehopper.api.pwm import HardwarePwm, HardwarePwmFrequency
from treehopper.api.treehopper_usb import TreehopperUsb
from treehopper.api.find_boards import find_boards
from treehopper.api.i2c import I2C
from treehopper.api.spi import HardwareSpi
from treehopper.api.i2c import HardwareI2C
from treehopper.api.uart import HardwareUart
__all__ = ['find_boards', 'TreehopperUsb',
'Pin', 'PinMode', 'ReferenceLevel',
'HardwareSpi', 'Spi', 'ChipSelectMode', 'SpiMode', 'SpiBurstMode',
'HardwareI2C', 'I2C',
'HardwareUart','Uart', 'OneWire',
'HardwarePwm', 'HardwarePwmFrequency',
]
| treehopper-electronics/treehopper-sdk | Python/treehopper/api/__init__.py | Python | mit | 976 |
# =============================================================================
# OWSLib. Copyright (C) 2005 Sean C. Gillies
#
# Contact email: [email protected]
#
# $Id: wfs.py 503 2006-02-01 17:09:12Z dokai $
# =============================================================================
#owslib imports:
from owslib.ows import ServiceIdentification, ServiceProvider, OperationsMetadata
from owslib.etree import etree
from owslib.util import nspath, testXMLValue
from owslib.crs import Crs
from owslib.feature import WebFeatureService_
from owslib.namespaces import Namespaces
#other imports
import cgi
from cStringIO import StringIO
from urllib import urlencode
from urllib2 import urlopen
import logging
from owslib.util import log
n = Namespaces()
WFS_NAMESPACE = n.get_namespace("wfs20")
OWS_NAMESPACE = n.get_namespace("ows110")
OGC_NAMESPACE = n.get_namespace("ogc")
GML_NAMESPACE = n.get_namespace("gml")
FES_NAMESPACE = n.get_namespace("fes")
class ServiceException(Exception):
pass
class WebFeatureService_2_0_0(WebFeatureService_):
"""Abstraction for OGC Web Feature Service (WFS).
Implements IWebFeatureService.
"""
def __new__(self,url, version, xml, parse_remote_metadata=False):
""" overridden __new__ method
@type url: string
@param url: url of WFS capabilities document
@type xml: string
@param xml: elementtree object
@type parse_remote_metadata: boolean
@param parse_remote_metadata: whether to fully process MetadataURL elements
@return: initialized WebFeatureService_2_0_0 object
"""
obj=object.__new__(self)
obj.__init__(url, version, xml, parse_remote_metadata)
return obj
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self, url, version, xml=None, parse_remote_metadata=False):
"""Initialize."""
if log.isEnabledFor(logging.DEBUG):
log.debug('building WFS %s'%url)
self.url = url
self.version = version
self._capabilities = None
reader = WFSCapabilitiesReader(self.version)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
self._buildMetadata(parse_remote_metadata)
def _buildMetadata(self, parse_remote_metadata=False):
'''set up capabilities metadata objects: '''
#serviceIdentification metadata
serviceidentelem=self._capabilities.find(nspath('ServiceIdentification'))
self.identification=ServiceIdentification(serviceidentelem)
#need to add to keywords list from featuretypelist information:
featuretypelistelem=self._capabilities.find(nspath('FeatureTypeList', ns=WFS_NAMESPACE))
featuretypeelems=featuretypelistelem.findall(nspath('FeatureType', ns=WFS_NAMESPACE))
for f in featuretypeelems:
kwds=f.findall(nspath('Keywords/Keyword',ns=OWS_NAMESPACE))
if kwds is not None:
for kwd in kwds[:]:
if kwd.text not in self.identification.keywords:
self.identification.keywords.append(kwd.text)
#TODO: update serviceProvider metadata, miss it out for now
serviceproviderelem=self._capabilities.find(nspath('ServiceProvider'))
self.provider=ServiceProvider(serviceproviderelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find(nspath('OperationsMetadata'))[:]:
if elem.tag !=nspath('ExtendedCapabilities'):
self.operations.append(OperationsMetadata(elem))
#serviceContents metadata: our assumption is that services use a top-level
#layer as a metadata organizer, nothing more.
self.contents={}
featuretypelist=self._capabilities.find(nspath('FeatureTypeList',ns=WFS_NAMESPACE))
features = self._capabilities.findall(nspath('FeatureTypeList/FeatureType', ns=WFS_NAMESPACE))
for feature in features:
cm=ContentMetadata(feature, featuretypelist, parse_remote_metadata)
self.contents[cm.id]=cm
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def getcapabilities(self, timeout=30):
"""Request and return capabilities document from the WFS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WFSCapabilitiesReader(self.version)
return urlopen(reader.capabilities_url(self.url), timeout=timeout)
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def getfeature(self, typename=None, filter=None, bbox=None, featureid=None,
featureversion=None, propertyname=None, maxfeatures=None,storedQueryID=None, storedQueryParams={},
method='Get', timeout=30, outputFormat=None):
"""Request and return feature data as a file-like object.
#TODO: NOTE: have changed property name from ['*'] to None - check the use of this in WFS 2.0
Parameters
----------
typename : list
List of typenames (string)
filter : string
XML-encoded OGC filter expression.
bbox : tuple
(left, bottom, right, top) in the feature type's coordinates == (minx, miny, maxx, maxy)
featureid : list
List of unique feature ids (string)
featureversion : string
Default is most recent feature version.
propertyname : list
List of feature property names. '*' matches all.
maxfeatures : int
Maximum number of features to be returned.
method : string
Qualified name of the HTTP DCP method to use.
timeout : number
A timeout value (in seconds) for the request.
outputFormat: string (optional)
Requested response format of the request.
There are 3 different modes of use
1) typename and bbox (simple spatial query)
2) typename and filter (==query) (more expressive)
3) featureid (direct access to known features)
"""
url = data = None
if typename and type(typename) == type(""):
typename = [typename]
if method.upper() == "GET":
(url) = self.getGETGetFeatureRequest(typename, filter, bbox, featureid,
featureversion, propertyname,
maxfeatures, storedQueryID,
storedQueryParams, outputFormat)
if log.isEnabledFor(logging.DEBUG):
log.debug('GetFeature WFS GET url %s'% url)
else:
(url,data) = self.getPOSTGetFeatureRequest()
# If method is 'Post', data will be None here
u = urlopen(url, data, timeout)
# check for service exceptions, rewrap, and return
# We're going to assume that anything with a content-length > 32k
# is data. We'll check anything smaller.
try:
length = int(u.info()['Content-Length'])
have_read = False
except KeyError:
data = u.read()
have_read = True
length = len(data)
if length < 32000:
if not have_read:
data = u.read()
try:
tree = etree.fromstring(data)
except BaseException:
# Not XML
return StringIO(data)
else:
if tree.tag == "{%s}ServiceExceptionReport" % OGC_NAMESPACE:
se = tree.find(nspath('ServiceException', OGC_NAMESPACE))
raise ServiceException(str(se.text).strip())
else:
return StringIO(data)
else:
if have_read:
return StringIO(data)
return u
def getpropertyvalue(self, query=None, storedquery_id=None, valuereference=None, typename=None, method=nspath('Get'),**kwargs):
''' the WFS GetPropertyValue method'''
base_url = self.getOperationByName('GetPropertyValue').methods[method]['url']
request = {'service': 'WFS', 'version': self.version, 'request': 'GetPropertyValue'}
if query:
request['query'] = str(query)
if valuereference:
request['valueReference'] = str(valuereference)
if storedquery_id:
request['storedQuery_id'] = str(storedquery_id)
if typename:
request['typename']=str(typename)
if kwargs:
for kw in kwargs.keys():
request[kw]=str(kwargs[kw])
encoded_request=urlencode(request)
u = urlopen(base_url + encoded_request)
return u.read()
def _getStoredQueries(self, timeout=30):
''' gets descriptions of the stored queries available on the server '''
sqs=[]
#This method makes two calls to the WFS - one ListStoredQueries, and one DescribeStoredQueries. The information is then
#aggregated in 'StoredQuery' objects
method=nspath('Get')
#first make the ListStoredQueries response and save the results in a dictionary if form {storedqueryid:(title, returnfeaturetype)}
base_url = self.getOperationByName('ListStoredQueries').methods[method]['url']
request = {'service': 'WFS', 'version': self.version, 'request': 'ListStoredQueries'}
encoded_request = urlencode(request)
u = urlopen(base_url + encoded_request, timeout=timeout)
tree=etree.fromstring(u.read())
base_url = self.getOperationByName('ListStoredQueries').methods[method]['url']
tempdict={}
for sqelem in tree[:]:
title=rft=id=None
id=sqelem.get('id')
for elem in sqelem[:]:
if elem.tag==nspath('Title', WFS_NAMESPACE):
title=elem.text
elif elem.tag==nspath('ReturnFeatureType', WFS_NAMESPACE):
rft=elem.text
tempdict[id]=(title,rft) #store in temporary dictionary
#then make the DescribeStoredQueries request and get the rest of the information about the stored queries
base_url = self.getOperationByName('DescribeStoredQueries').methods[method]['url']
request = {'service': 'WFS', 'version': self.version, 'request': 'DescribeStoredQueries'}
encoded_request = urlencode(request)
u = urlopen(base_url + encoded_request, timeout=timeout)
tree=etree.fromstring(u.read())
tempdict2={}
for sqelem in tree[:]:
params=[] #list to store parameters for the stored query description
id =sqelem.get('id')
for elem in sqelem[:]:
if elem.tag==nspath('Abstract', WFS_NAMESPACE):
abstract=elem.text
elif elem.tag==nspath('Parameter', WFS_NAMESPACE):
newparam=Parameter(elem.get('name'), elem.get('type'))
params.append(newparam)
tempdict2[id]=(abstract, params) #store in another temporary dictionary
#now group the results into StoredQuery objects:
for key in tempdict.keys():
abstract='blah'
parameters=[]
sqs.append(StoredQuery(key, tempdict[key][0], tempdict[key][1], tempdict2[key][0], tempdict2[key][1]))
return sqs
storedqueries = property(_getStoredQueries, None)
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class StoredQuery(object):
'''' Class to describe a storedquery '''
def __init__(self, id, title, returntype, abstract, parameters):
self.id=id
self.title=title
self.returnfeaturetype=returntype
self.abstract=abstract
self.parameters=parameters
class Parameter(object):
def __init__(self, name, type):
self.name=name
self.type=type
class ContentMetadata:
"""Abstraction for WFS metadata.
Implements IMetadata.
"""
def __init__(self, elem, parent, parse_remote_metadata=False, timeout=30):
"""."""
self.id = elem.find(nspath('Name',ns=WFS_NAMESPACE)).text
self.title = elem.find(nspath('Title',ns=WFS_NAMESPACE)).text
abstract = elem.find(nspath('Abstract',ns=WFS_NAMESPACE))
if abstract is not None:
self.abstract = abstract.text
else:
self.abstract = None
self.keywords = [f.text for f in elem.findall(nspath('Keywords',ns=WFS_NAMESPACE))]
# bboxes
self.boundingBoxWGS84 = None
b = elem.find(nspath('WGS84BoundingBox',ns=OWS_NAMESPACE))
if b is not None:
lc = b.find(nspath("LowerCorner",ns=OWS_NAMESPACE))
uc = b.find(nspath("UpperCorner",ns=OWS_NAMESPACE))
ll = [float(s) for s in lc.text.split()]
ur = [float(s) for s in uc.text.split()]
self.boundingBoxWGS84 = (ll[0],ll[1],ur[0],ur[1])
# there is no such think as bounding box
# make copy of the WGS84BoundingBox
self.boundingBox = (self.boundingBoxWGS84[0],
self.boundingBoxWGS84[1],
self.boundingBoxWGS84[2],
self.boundingBoxWGS84[3],
Crs("epsg:4326"))
# crs options
self.crsOptions = [Crs(srs.text) for srs in elem.findall(nspath('OtherCRS',ns=WFS_NAMESPACE))]
defaultCrs = elem.findall(nspath('DefaultCRS',ns=WFS_NAMESPACE))
if len(defaultCrs) > 0:
self.crsOptions.insert(0,Crs(defaultCrs[0].text))
# verbs
self.verbOptions = [op.tag for op \
in parent.findall(nspath('Operations/*',ns=WFS_NAMESPACE))]
self.verbOptions + [op.tag for op \
in elem.findall(nspath('Operations/*',ns=WFS_NAMESPACE)) \
if op.tag not in self.verbOptions]
#others not used but needed for iContentMetadata harmonisation
self.styles=None
self.timepositions=None
self.defaulttimeposition=None
# MetadataURLs
self.metadataUrls = []
for m in elem.findall('MetadataURL'):
metadataUrl = {
'type': testXMLValue(m.attrib['type'], attrib=True),
'format': m.find('Format').text.strip(),
'url': testXMLValue(m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href'], attrib=True)
}
if metadataUrl['url'] is not None and parse_remote_metadata: # download URL
try:
content = urllib2.urlopen(metadataUrl['url'], timeout=timeout)
doc = etree.parse(content)
try: # FGDC
metadataUrl['metadata'] = Metadata(doc)
except: # ISO
metadataUrl['metadata'] = MD_Metadata(doc)
except Exception, err:
metadataUrl['metadata'] = None
self.metadataUrls.append(metadataUrl)
class WFSCapabilitiesReader(object):
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='2.0.0'):
"""Initialize"""
self.version = version
self._infoset = None
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WFS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, url, timeout=30):
"""Get and parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
Parameters
----------
url : string
The URL to the WFS capabilities document.
timeout : number
A timeout value (in seconds) for the request.
"""
request = self.capabilities_url(url)
u = urlopen(request, timeout=timeout)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
string should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st)
| rbejar/odrl-ogc-cache-policies | owslib/feature/wfs200.py | Python | mit | 17,661 |
#!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for replays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import mpyq
import six
from pysc2.run_configs import lib as run_configs_lib
def get_replay_version(replay_data):
replay_io = six.BytesIO()
replay_io.write(replay_data)
replay_io.seek(0)
archive = mpyq.MPQArchive(replay_io).extract()
metadata = json.loads(archive[b"replay.gamemetadata.json"].decode("utf-8"))
return run_configs_lib.Version(
game_version=".".join(metadata["GameVersion"].split(".")[:-1]),
build_version=int(metadata["BaseBuild"][4:]),
data_version=metadata.get("DataVersion"), # Only in replays version 4.1+.
binary=None)
| deepmind/pysc2 | pysc2/lib/replay.py | Python | apache-2.0 | 1,345 |
import copy
from functools import partial
from collections import defaultdict
from django.db import models
from django.conf import settings
from django.db.models.options import DEFAULT_NAMES as ALL_META_OPTIONS
from utils import *
from storage import *
from constants import *
from history_model_methods import get_history_fields
from history_model_methods import get_history_methods
import fields
import manager
class ChangesTracker(object):
def connect(self, m, manager_name=None):
self.manager_name = manager_name
if m._meta.abstract:
# We can't do anything on the abstract model.
return
elif m._meta.proxy:
# Get the history model from the base class.
base = m
while base._meta.proxy:
base = base._meta.proxy_for_model
if hasattr(base, '_history_manager_name'):
history_model = get_versions(m).model
else:
history_model = self.create_history_model(m)
do_versioning = getattr(
settings, 'VERSIONUTILS_VERSIONING_ENABLED', True)
if not do_versioning:
# We still create the historical models but don't set any
# signals for saving/deleting.
return
setattr(m, '_track_changes', True)
# Over-ride the save, delete methods to allow arguments to be passed in
# such as comment="Made a small change."
setattr(m, 'save', save_func(m.save))
setattr(m, 'delete', delete_func(m.delete))
# We also attach signal handlers to the save, delete methods. It's
# easier to have two things going on (signal, and an overridden method)
# because the signal handlers tell us if the object is new or not.
# (Just checking object.pk is None is not enough!)
# Signal handlers are called when a bulk QuerySet.delete()
# is issued -- custom delete() and save() methods aren't called
# in that case.
_post_save = partial(self.post_save, m)
_pre_delete = partial(self.pre_delete, m)
_post_delete = partial(self.post_delete, m)
# The ChangesTracker object will be discarded, so the signal handlers
# can't use weak references.
models.signals.post_save.connect(_post_save, weak=False)
models.signals.pre_delete.connect(_pre_delete, weak=False)
models.signals.post_delete.connect(_post_delete, weak=False)
self.wrap_model_fields(m)
descriptor = manager.HistoryDescriptor(history_model)
setattr(m, self.manager_name, descriptor)
# Being able to look this up is intensely helpful.
setattr(m, '_history_manager_name', self.manager_name)
def ineligible_for_history_model(self, model):
"""
Certain abstract-y models can't have corresponding history models.
"""
return model._meta.proxy or model._meta.abstract
def create_history_model(self, model):
"""
Creates a historical model to associate with the model provided.
See http://code.djangoproject.com/wiki/DynamicModels
Django 'sees' this new historical model class and actually creates a
new model in the database (on syncdb) named <originalmodel>_hist.
This happens because the class that's returned is just like a normal
model definition ('class MyModel(models.Model)') in the eyes of the
Django machinery.
Returns:
Class representing the historical version of the model.
"""
# For convience's sake, it's nice to be able to reference the
# non-historical class from the historical class.
attrs = {
'_original_model': model,
# Though not strictly a field, this attribute
# is required for a model to function properly.
'__module__': model.__module__,
}
attrs.update(get_history_methods(self, model))
# Parents mean we are concretely subclassed.
if not model._meta.parents:
# Store _misc_members for later lookup.
misc_members = self.get_misc_members(model)
attrs.update(misc_members)
attrs.update({'_original_callables':
self.get_callables(model, skip=misc_members)})
attrs.update(Meta=type('Meta', (), self.get_meta_options(model)))
if not is_versioned(model.__base__):
attrs.update(get_history_fields(self, model))
attrs.update(self.get_extra_history_fields(model))
attrs.update(self.get_fields(model))
name = '%s_hist' % model._meta.object_name
# If we have a parent (meaning we're concretely subclassing)
# then let's have our historical object subclass the parent
# model's historical model, if the parent model is versioned.
# Concretely subclassed models keep some of their information in
# their parent model's table, and so if we subclass then we can
# mirror this DB relationship for our historical models.
# Migrations are easier this way -- you migrate historical
# models in the exact same fashion as non-historical models.
if model._meta.parents:
if is_versioned(model.__base__):
return type(
name, (get_versions(model.__base__).model,), attrs)
return type(name, (model.__base__,), attrs)
return type(name, (models.Model,), attrs)
def wrap_model_fields(self, model):
"""
Wrap some of the model's fields to add extra behavior.
"""
for field in model._meta.fields:
if isinstance(field, models.FileField):
field.storage = FileStorageWrapper(field.storage)
MEMBERS_TO_SKIP = [
'__dict__',
'__module__',
'__weakref__',
# TODO, maybe: if we wanted to be fancy we could define our
# own __doc__ that explains "this is like the normal model
# plus these historical methods."
'__doc__',
'_meta',
'_base_manager',
'_default_manager',
'save',
'delete'
]
FK_FIELDS_TO_COPY = [
'null',
'blank',
'db_index',
'db_tablespace',
'to_field'
]
M2M_FIELDS_TO_COPY = [
'null',
'blank',
'db_index',
'db_tablespace',
]
def get_misc_members(self, model):
# Would like to know a better way to do this.
# Ideally we would subclass the model and then extend it,
# but Django won't let us replace a field (in our case, a
# ForeignKey field change to point to a historical model).
# See also http://bit.ly/9k2Eqn
#
# The error we see when trying this is:
# FieldError: Local field 'person' in class 'ProfileChangeFK' clashes
# with field of similar name from base class 'Profile'
#
# But really -- even though the historical model does a good job
# of pretending to be the non-historical model -- it's still a
# different model. Faking might be bad. People shouldn't write code
# that depends on the model type. If this becomes problematic then
# we can swap out our call to type() in create_history_model()
# for a call to a metaclass with __instancecheck__ /
# __subclasscheck__ defined (a'la python 2.6)
d = copy.copy(dict(model.__dict__))
for k in ChangesTracker.MEMBERS_TO_SKIP:
if d.get(k, None) is not None:
del d[k]
# Modify fields and remove some fields we know we'll re-add in
# modified form later.
for k in copy.copy(d):
if isinstance(d[k], models.fields.Field):
del d[k]
continue
# This appears with model inheritance.
elif isinstance(getattr(d[k], 'field', None), models.fields.Field):
del d[k]
continue
elif isinstance(getattr(d[k], 'manager', None), models.Manager):
# Re-init managers.
d[k] = d[k].manager.__class__()
elif callable(d[k]):
# Skip callables - we deal with these separately.
del d[k]
continue
return d
def get_callables(self, model, skip=None):
if skip is None:
skip = {}
d = {}
attrs = dict(model.__dict__)
for k in attrs:
if (k in ChangesTracker.MEMBERS_TO_SKIP or k in skip):
continue
if callable(attrs[k]):
d[k] = attrs[k]
return d
def get_fields(self, model):
"""
Creates copies of the model's original fields.
Returns:
A dictionary mapping field name to copied field object.
"""
def _get_fk_opts(field):
opts = {}
for k in ChangesTracker.FK_FIELDS_TO_COPY:
if hasattr(field, k):
opts[k] = getattr(field, k, None)
return opts
def _get_m2m_opts(field):
# Always set symmetrical to False as there's no disadvantage
# to allowing reverse lookups.
opts = {'symmetrical': False}
for k in ChangesTracker.M2M_FIELDS_TO_COPY:
if hasattr(field, k):
opts[k] = getattr(field, k, None)
# XXX TODO deal with intermediate tables
if hasattr(field, 'through'):
pass
return opts
attrs = {}
for field in (model._meta.local_fields +
model._meta.local_many_to_many):
field = copy.deepcopy(field)
if isinstance(field, models.AutoField):
# The historical model gets its own AutoField, so any
# existing one must be replaced with an IntegerField.
field.__class__ = models.IntegerField
field.serialize = True
if (getattr(field, 'primary_key', None) or
getattr(field, 'unique', None)):
# Unique fields can no longer be guaranteed unique,
# but they should still be indexed for faster lookups.
field.primary_key = False
field._unique = False
field.db_index = True
if getattr(field, 'auto_now', None):
# Don't set auto_now=True historical models' fields.
field.auto_now = False
is_fk = isinstance(field, models.ForeignKey)
is_m2m = isinstance(field, models.ManyToManyField)
if isinstance(field, models.fields.related.RelatedField):
parent_model = field.rel.to
is_to_self = parent_model == model
# If the field is versioned then we replace it with a
# FK/M2M to the historical model.
if is_versioned(parent_model) or is_to_self:
if getattr(field.rel, 'parent_link', None):
# Concrete model inheritance and the parent is
# versioned. In this case, we subclass the
# parent historical model and use that parent
# related field instead.
continue
if is_fk:
model_type = models.ForeignKey
options = _get_fk_opts(field)
else:
model_type = models.ManyToManyField
options = _get_m2m_opts(field)
_m2m_changed = partial(self.m2m_changed, field.name)
models.signals.m2m_changed.connect(_m2m_changed,
sender=getattr(model, field.name).through,
weak=False,
)
if is_to_self:
# Fix related name conflict. We set this manually
# elsewhere so giving this a funky name isn't a
# problem.
options['related_name'] = ('%s_hist_set_2' %
field.related.var_name)
hist_field = model_type('self', **options)
else:
# Make the field into a foreignkey pointed at the
# history model.
fk_hist_obj = get_versions(parent_model).model
hist_field = model_type(fk_hist_obj, **options)
hist_field.name = field.name
if hasattr(field, 'attname'):
hist_field.attname = field.attname
field = hist_field
if not is_fk and not is_m2m:
# When this isn't an FK (which includes OneToOne) or M2M
# then this is something a bit more strange. In
# this case, let's make the related name end with
# '+' which causes the related descriptor to not be
# added. We'll also use our own unique name here to
# avoid collisions, which were seen with
# django-taggit (it uses a forced related_name on
# all models).
field.rel.related_name = '%s_hist_%s+' % (
model.__name__.lower(),
model._meta.app_label.lower()
)
attrs[field.name] = field
return attrs
def get_extra_history_fields(self, model):
"""
Extra, non-essential fields for the historical models.
If you subclass ChangesTracker this is a good method to over-ride:
simply add your own values to the fields for custom fields.
NOTE: Your custom fields should start with history_ if you want them
to be looked up via hm.version_info.fieldname
Here's an example of extending to add a descriptive textfield::
def get_extra_history_fields(self, model):
# Keep base_attrs -- we like user tracking!
attrs = super(MyTrackChanges, self).get_extra_history_fields()
attrs['history_long_description'] = models.TextField(
blank=True, null=True)
return attrs
Returns:
A dictionary of fields that will be added to the historical
record model.
"""
def _history_user_link(m):
if m.version_info.user:
user = m.version_info.user
return '<a href="%s">%s</a>' % (user.get_absolute_url(), user)
else:
if getattr(settings, 'SHOW_IP_ADDRESSES', True):
return m.version_info.user_ip
else:
return 'unknown'
attrs = {
'history_comment': models.CharField(max_length=200, blank=True,
null=True),
'history_user': fields.AutoUserField(null=True),
'history_user_ip': fields.AutoIPAddressField(null=True),
'history_user_link': _history_user_link,
'__unicode__': lambda self: u'%s as of %s' % (
self.history__object, self.history_date),
}
return attrs
META_TO_SKIP = [
'db_table', 'get_latest_by', 'managed', 'unique_together', 'ordering',
]
def get_meta_options(self, model):
"""
Returns:
A dictionary of fields that will be added to the Meta inner
class of the historical record model.
"""
meta = {'ordering': ('-history_date',)}
for k in ALL_META_OPTIONS:
if k in ChangesTracker.META_TO_SKIP:
continue
meta[k] = getattr(model._meta, k)
meta['verbose_name'] = meta['verbose_name'] + ' hist'
return meta
def post_save(self, parent, instance, created, **kws):
# To support subclassing.
if not isinstance(instance, parent):
return
parent_instance = get_parent_instance(instance, parent)
if parent_instance:
if (is_versioned(parent_instance) and
is_directly_versioned(instance)):
# The parent instance has its own handler that will fire
# in this case.
return
# Because this signal is attached to the parent
# class, let's save a historical record for the
# parent instance here.
instance = parent_instance
history_type = getattr(instance, '_history_type', None)
if not history_type == TYPE_REVERTED_CASCADE:
is_revert = history_type == TYPE_REVERTED
if created:
history_type = TYPE_REVERTED_ADDED if is_revert else TYPE_ADDED
else:
history_type = history_type or TYPE_UPDATED
hist_instance = self.create_historical_record(instance, history_type)
self.m2m_init(instance, hist_instance)
def pre_delete(self, parent, instance, **kws):
# To support subclassing.
if not isinstance(instance, parent):
return
parent_instance = get_parent_instance(instance, parent)
if parent_instance:
if is_versioned(parent_instance):
# The parent instance has its own handler that will fire
# in this case.
return
# Because this signal is attached to the parent
# class, let's save a historical record for the
# parent instance here.
instance = parent_instance
self._pk_recycle_cleanup(instance)
# If this model has a parent model (concrete model inheritance)
# and that model isn't versioned then we need to set
# track_changes=False. This is because the parent model is
# considered a 'part' of the child model in this case and this
# is the only way to ensure consistency.
for model in instance.__class__._meta.parents:
if not is_versioned(model):
instance._track_changes = False
def post_delete(self, parent, instance, **kws):
# To support subclassing.
if not isinstance(instance, parent):
return
parent_instance = get_parent_instance(instance, parent)
if parent_instance:
if is_versioned(parent_instance):
# The parent instance has its own handler that will fire
# in this case.
return
# Because this signal is attached to the parent
# class, let's save a historical record for the
# parent instance here.
instance = parent_instance
history_type = getattr(instance, '_history_type', None)
is_delete_cascade = getattr(instance, '_is_delete_cascade', None)
if history_type == TYPE_REVERTED:
if is_delete_cascade:
history_type = TYPE_REVERTED_DELETED_CASCADE
else:
history_type = TYPE_REVERTED_DELETED
else:
if is_delete_cascade:
history_type = TYPE_DELETED_CASCADE
else:
history_type = TYPE_DELETED
if not is_pk_recycle_a_problem(instance) and instance._track_changes:
hist_instance = self.create_historical_record(
instance, history_type)
self.m2m_init(instance, hist_instance)
# Disconnect the related objects signals
if hasattr(instance, '_rel_objs_methods'):
for model, method in instance._rel_objs_methods.iteritems():
models.signals.pre_delete.disconnect(method, model, weak=False)
def m2m_init(self, instance, hist_instance):
"""
Initialize the ManyToMany sets on a historical instance.
"""
for field in instance._meta.many_to_many:
parent_model = field.rel.to
if is_versioned(parent_model):
current_objs = getattr(instance, field.name).all()
m2m_items = []
for o in current_objs:
m2m_items.append(get_versions(o).most_recent())
setattr(hist_instance, field.name, m2m_items)
def m2m_changed(self, attname, sender, instance, action, reverse,
model, pk_set, **kwargs):
"""
A signal handler that syncs changes to m2m relations with
historical models.
Args:
attname: Attribute name of the m2m field on the base model.
"""
if pk_set:
changed_ms = [model.objects.get(pk=pk) for pk in pk_set]
hist_changed_ms = []
for m in changed_ms:
hist_changed_ms.append(get_versions(m).most_recent())
hist_instance = get_versions(instance).most_recent()
hist_through = getattr(hist_instance, attname)
if action == 'post_add':
for hist_m in hist_changed_ms:
hist_through.add(hist_m)
elif action == 'post_remove':
for hist_m in hist_changed_ms:
hist_through.remove(hist_m)
elif action == 'post_clear':
hist_through.clear()
def create_historical_record(self, instance, type):
manager = getattr(instance, self.manager_name)
# If they set track_changes to False
# then we don't auto-create a revision here.
if not instance._track_changes:
return
attrs = {}
for field in instance._meta.fields:
if isinstance(field, models.fields.related.ForeignKey):
is_fk_to_self = (field.related.parent_model ==
instance.__class__)
if is_versioned(field.related.parent_model) or is_fk_to_self:
if field.rel.parent_link:
# Concrete model inheritance and the parent is
# versioned. In this case, we subclass the
# parent historical model and use that parent
# related field instead.
continue
# If the FK field is versioned, set it to the most
# recent version of that object.
# The object the FK id refers to may have been
# deleted so we can't simply do Model.objects.get().
fk_hist_model = get_versions(field.rel.to).model
fk_id_name = field.rel.field_name
fk_id_val = getattr(instance, field.attname)
fk_objs = fk_hist_model.objects.filter(
**{fk_id_name: fk_id_val}
)
if fk_objs:
attrs[field.name] = fk_objs[0] # most recent version
else:
attrs[field.name] = None
continue
attrs[field.attname] = getattr(instance, field.attname)
attrs.update(self._get_save_with_attrs(instance))
return manager.create(history_type=type, **attrs)
def _get_save_with_attrs(self, instance):
"""
Prefix all keys with 'history_' to save them into the history
model correctly.
"""
d = {}
for k, v in getattr(instance, '_save_with', {}).iteritems():
d['history_%s' % k] = v
return d
def _pk_recycle_cleanup(self, instance):
"""
SQLite recycles autofield primary keys. Oops!
This will be fixed eventually:
Django Ticket #10164: http://code.djangoproject.com/ticket/10164
As a temporary consistency fix, we zero-out history of deleted objects
that lack unique fields to prevent history-mystery! The alternative
would be to throw an error on deletion of these objects.
"""
if not is_pk_recycle_a_problem(instance):
return
manager = getattr(instance, self.manager_name)
for entry in manager.all():
entry.delete()
def _related_objs_cascade_bookkeeping(m):
"""
m.delete() causes a cascaded delete to occur, wherein all related
objects are also deleted. This method passes along our custom
delete() parameters (like m.delete(comment="My comment")), sets
certain attributes correctly (like track_changes=False), and ensures
that the deleted objects' historical records note that they were
deleted via a cascade.
Args:
m: A model instance.
"""
def _do_bookkeeping_on(child_m, model, ids_to_catch,
instance, **kws):
if instance.pk in ids_to_catch:
instance._track_changes = child_m._track_changes
instance._save_with = child_m._save_with
instance._is_delete_cascade = True
m._rel_objs_to_catch = defaultdict(dict)
m._rel_objs_methods = {}
# Get the related objects that are versioned.
related_objects = m._meta.get_all_related_objects()
related_versioned = [o for o in related_objects if is_versioned(o.model)]
# Build a dictionary mapping model class -> pks of the related
# objects.
for rel_o in related_versioned:
accessor = rel_o.get_accessor_name()
if not hasattr(m, accessor):
continue
# OneToOneField means a single object.
if rel_o.field.related.field.__class__ == models.OneToOneField:
objs = [getattr(m, accessor)]
else:
objs = getattr(m, accessor).all()
for o in objs:
# We use a dictionary for fast lookup in
# _do_bookkeeping_on
m._rel_objs_to_catch[o.__class__][o.pk] = True
# Django will *not* call m.delete on children -- it does a
# bulk delete on children + does a post_delete and
# pre_delete on each child.
# We go recursive here because the way Django does delete
# cascades is, with models A --fk--> B --fk--> C, pre_delete
# on C, then pre_delete on B, then pre_delete on A. Django
# will *not* call m.delete() on children - it only calls
# pre_delete and post_delete along with a bulk database
# delete.
o._save_with = m._save_with
o._track_changes = m._track_changes
_related_objs_cascade_bookkeeping(o)
# For each relevant related object, we attach a method to the
# pre_delete signal that does our bookkeeping.
for model in m._rel_objs_to_catch:
ids_to_catch = m._rel_objs_to_catch[model]
# Construct a method that will check to see if the sender is
# one of the related objects.
_do_bookkeeping = partial(_do_bookkeeping_on, m, model,
ids_to_catch)
# We use signals here. One alternative would be to do the
# cascade ourselves, but we can't be sure of casade behavior.
models.signals.pre_delete.connect(_do_bookkeeping, sender=model,
weak=False)
# Save the method so we can disconnect it it after the delete.
m._rel_objs_methods[model] = _do_bookkeeping
def save_func(model_save):
def save(m, *args, **kws):
return save_with_arguments(model_save, m, *args, **kws)
return save
def save_with_arguments(model_save, m, force_insert=False, force_update=False,
using=None, track_changes=True, **kws):
"""
A simple custom save() method on models with changes tracked.
NOTE: All essential logic should go into our post/pre-delete/save
signal handlers, NOT in this method. Custom delete()
methods aren't called when doing bulk QuerySet.delete().
"""
m._track_changes = track_changes
if not hasattr(m, '_save_with'):
m._save_with = {}
m._save_with.update(kws)
return model_save(m, force_insert=force_insert,
force_update=force_update, using=using,
)
def delete_func(model_delete):
def delete(*args, **kws):
return delete_with_arguments(model_delete, *args, **kws)
return delete
def delete_with_arguments(model_delete, m, using=None,
track_changes=True, **kws):
"""
A simple custom delete() method on models with changes tracked.
NOTE: Most history logic should go into our post/pre-delete/save
signal handlers, NOT in this method. Custom delete()
methods aren't called when doing bulk QuerySet.delete().
"""
m._track_changes = track_changes
m._save_with = kws
if is_pk_recycle_a_problem(m):
m._track_changes = False
_related_objs_cascade_bookkeeping(m)
return model_delete(m, using=using)
| mivanov/editkit | editkit/versionutils/versioning/models.py | Python | gpl-2.0 | 28,965 |
import numpy as np
class mymesh(np.ndarray):
def __new__(cls, init, val=0.0):
"""
Instantiates new datatype. This ensures that even when manipulating data, the result is still a mesh.
Args:
init: either another mesh or a tuple containing the dimensions, the communicator and the dtype
val: value to initialize
Returns:
obj of type mesh
"""
if isinstance(init, mymesh):
obj = np.ndarray.__new__(cls, init.shape, dtype=init.dtype, buffer=None)
obj[:] = init[:]
obj.part1 = obj[0, :]
obj.part2 = obj[1, :]
elif isinstance(init, tuple):
obj = np.ndarray.__new__(cls, shape=(2, init[0]), dtype=init[1], buffer=None)
obj.fill(val)
obj.part1 = obj[0, :]
obj.part2 = obj[1, :]
else:
raise NotImplementedError(type(init))
return obj
def __array_finalize__(self, obj):
"""
Finalizing the datatype. Without this, new datatypes do not 'inherit' the communicator.
"""
if obj is None:
return
self.part1 = getattr(obj, 'part1', None)
self.part2 = getattr(obj, 'part2', None)
m = mymesh((10, np.dtype('float64')))
m.part1[:] = 1
m.part2[:] = 2
print(m.part1, m.part2)
print(m)
print()
n = mymesh(m)
n.part1[0] = 10
print(n.part1, n.part2)
print(n)
print()
print(o.part1, o.part2)
print(o)
print()
# print(n + m) | Parallel-in-Time/pySDC | pySDC/playgrounds/other/parallel_rhs_mesh.py | Python | bsd-2-clause | 1,490 |
# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python2.7/dist-packages/PyKDE4/kdeui.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KMultiTabBar(__PyQt4_QtGui.QWidget):
# no doc
def appendButton(self, *args, **kwargs): # real signature unknown
pass
def appendTab(self, *args, **kwargs): # real signature unknown
pass
def button(self, *args, **kwargs): # real signature unknown
pass
def fontChange(self, *args, **kwargs): # real signature unknown
pass
def isTabRaised(self, *args, **kwargs): # real signature unknown
pass
def position(self, *args, **kwargs): # real signature unknown
pass
def removeButton(self, *args, **kwargs): # real signature unknown
pass
def removeTab(self, *args, **kwargs): # real signature unknown
pass
def setPosition(self, *args, **kwargs): # real signature unknown
pass
def setStyle(self, *args, **kwargs): # real signature unknown
pass
def setTab(self, *args, **kwargs): # real signature unknown
pass
def tab(self, *args, **kwargs): # real signature unknown
pass
def tabStyle(self, *args, **kwargs): # real signature unknown
pass
def updateSeparator(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
Bottom = 3
KDEV3ICON = 2
KMultiTabBarPosition = None # (!) real value is ''
KMultiTabBarStyle = None # (!) real value is ''
Left = 0
Right = 1
STYLELAST = 65535
Top = 2
VSNET = 0
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyKDE4/kdeui/KMultiTabBar.py | Python | gpl-2.0 | 1,780 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from sqlalchemy.orm import validates
from .mixins import deferred, BusinessObject, Timeboxed, CustomAttributable
from .object_document import Documentable
from .object_owner import Ownable
from .object_person import Personable
from .option import Option
from .relationship import Relatable
from .utils import validate_option
from .track_object_state import HasObjectState, track_state_for_class
class Product(HasObjectState, CustomAttributable, Documentable, Personable,
Relatable, Timeboxed, Ownable, BusinessObject, db.Model):
__tablename__ = 'products'
kind_id = deferred(db.Column(db.Integer), 'Product')
version = deferred(db.Column(db.String), 'Product')
kind = db.relationship(
'Option',
primaryjoin='and_(foreign(Product.kind_id) == Option.id, '\
'Option.role == "product_type")',
uselist=False,
)
_publish_attrs = [
'kind',
'version',
]
_sanitize_html = ['version',]
_aliases = {
"url": "Product URL",
"kind": {
"display_name": "Kind/Type",
"filter_by": "_filter_by_kind",
},
}
@validates('kind')
def validate_product_options(self, key, option):
return validate_option(
self.__class__.__name__, key, option, 'product_type')
@classmethod
def _filter_by_kind(cls, predicate):
return Option.query.filter(
(Option.id == cls.kind_id) & predicate(Option.title)
).exists()
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Product, cls).eager_query()
return query.options(orm.joinedload('kind'))
track_state_for_class(Product)
| hyperNURb/ggrc-core | src/ggrc/models/product.py | Python | apache-2.0 | 1,890 |
import pyexcel as p
from nose.tools import eq_
def test_a_dictionary_of_sheet():
test_data = [["a", "b"]]
book_dict = {"test": p.Sheet(test_data)}
book = p.Book(book_dict)
eq_(book.test.array, test_data)
def test_book_len():
test_data = [["a", "b"]]
book_dict = {"test": p.Sheet(test_data)}
book = p.Book(book_dict)
eq_(len(book.test.array), 1)
def test_sheet_ordering():
test_data = [["a", "b"]]
book_dict = {"first": test_data, "middle": test_data, "last": test_data}
book = p.Book(book_dict)
eq_(book.sheet_names(), ["first", "middle", "last"])
| chfw/pyexcel | tests/test_book.py | Python | bsd-3-clause | 609 |
#import json
#import csv
class Series():
def __init__(self, games=None):
self.games = games or []
def from_json(self):
pass
def to_json(self):
pass
def from_csv(self):
pass
def to_csv(self):
pass
def avg_score(self):
scores = [g.score() for g in self.games]
return float(sum(scores)) / len(scores)
def avg_distribution(self):
pass
def distribution(self):
pass
| matthiaseisen/bowlingstats | bowling/series.py | Python | mit | 474 |
from __future__ import print_function
import os
from ants.utils.job import job_dir
from ants.utils.request import request_fingerprint
from ants.utils import log
class BaseDupeFilter(object):
@classmethod
def from_settings(cls, settings):
return cls()
def request_seen(self, request):
return False
def open(self): # can return deferred
pass
def close(self, reason): # can return a deferred
pass
def log(self, request, spider): # log that a request has been filtered
pass
class RFPDupeFilter(BaseDupeFilter):
"""Request Fingerprint duplicates filter"""
def __init__(self, path=None, debug=False):
self.file = None
self.fingerprints = set()
self.logdupes = True
self.debug = debug
if path:
self.file = open(os.path.join(path, 'requests.seen'), 'a+')
self.fingerprints.update(x.rstrip() for x in self.file)
@classmethod
def from_settings(cls, settings):
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(job_dir(settings), debug)
def request_seen(self, request):
fp = self.request_fingerprint(request)
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
if self.file:
self.file.write(fp + os.linesep)
def request_fingerprint(self, request):
return request_fingerprint(request)
def close(self, reason):
if self.file:
self.file.close()
def log(self, request, spider):
if self.debug:
log.spider_log("Filtered duplicate request:" + request.url, level=log.DEBUG, spider=spider)
elif self.logdupes:
fmt = ("Filtered duplicate request: " + request.url +
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
log.spider_log(fmt, level=log.DEBUG, spider=spider)
self.logdupes = False
spider.crawler.stats.inc_value('dupefilter/filtered', spider=spider)
| wcong/ants | ants/utils/dupefilter.py | Python | bsd-3-clause | 2,083 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 CERN
# Author: Pawel Szostek ([email protected])
#
# This file is part of Hdlmake.
#
# Hdlmake is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hdlmake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hdlmake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from action.action import Action
from util import path
import fetch
def _convert_to_source_name(source_code):
if source_code == fetch.GIT:
return "git"
elif source_code == fetch.SVN:
return "svn"
elif source_code == fetch.LOCAL:
return "local"
elif source_code == fetch.GITSUBMODULE:
return "git_submodule"
class ListModules(Action):
def run(self):
if self.options.withfiles:
for m in self.modules_pool:
if not m.isfetched:
print("#!UNFETCHED")
print(m.url+'\n')
else:
print(path.relpath(m.path))
if m.source in [fetch.SVN, fetch.GIT]:
print("# "+m.url)
elif m.source == fetch.GITSUBMODULE:
print("# submodule of: %s" % m.parent.url)
if m.source in [fetch.SVN, fetch.GIT, fetch.LOCAL] and m.parent:
print("# parent: %s" % m.parent.url)
else:
print("# root module")
if not len(m.files):
print(" # no files")
else:
for f in m.files:
print(" " + path.relpath(f.path, m.path))
print("")
else:
print("#path\tsource")
for m in self.modules_pool:
print("%s\t%s" % (path.relpath(m.path), _convert_to_source_name(m.source)))
| keyru/hdl-make | hdlmake/action/list_modules.py | Python | gpl-3.0 | 2,363 |
from __future__ import absolute_import
from typing import Any, Callable, Generator, Iterable, Tuple
from django.test import TestCase
from django.template import loader
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.handlers import allocate_handler_id
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient,
)
from zerver.lib.handlers import allocate_handler_id
from zerver.models import (
get_realm,
get_stream,
get_user_profile_by_email,
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
)
import base64
import os
import re
import time
import ujson
from six.moves import urllib
from contextlib import contextmanager
import six
API_KEYS = {} # type: Dict[str, str]
@contextmanager
def stub(obj, name, f):
# type: (Any, str, Callable[..., Any]) -> Generator[None, None, None]
old_f = getattr(obj, name)
setattr(obj, name, f)
yield
setattr(obj, name, old_f)
@contextmanager
def simulated_queue_client(client):
# type: (Any) -> Generator[None, None, None]
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client # type: ignore # https://github.com/JukkaL/mypy/issues/1152
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient # type: ignore # https://github.com/JukkaL/mypy/issues/1152
@contextmanager
def tornado_redirected_to_list(lst):
# type: (List) -> Generator[None, None, None]
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
# type: () -> Generator[List[Tuple[str, str, str]], None, None]
cache_queries = []
def my_cache_get(key, cache_name=None):
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
# type: () -> Generator[List[Dict[str, str]], None, None]
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = []
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.execute = cursor_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def cursor_executemany(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = cursor_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
yield queries
TimeTrackingCursor.execute = old_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = old_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def find_key_by_email(address):
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyObject(object):
pass
class DummyTornadoRequest(object):
def __init__(self):
self.connection = DummyObject()
self.connection.stream = DummyStream() # type: ignore # monkey-patching here
class DummyHandler(object):
def __init__(self, assert_callback):
self.assert_callback = assert_callback
self.request = DummyTornadoRequest()
allocate_handler_id(self)
# Mocks RequestHandler.async_callback, which wraps a callback to
# handle exceptions. We return the callback as-is.
def async_callback(self, cb):
return cb
def write(self, response):
raise NotImplemented
def zulip_finish(self, response, *ignore):
if self.assert_callback:
self.assert_callback(response)
class DummySession(object):
session_key = "0"
class DummyStream(object):
def closed(self):
return False
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile, assert_callback=None):
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler(assert_callback)
self.session = DummySession()
self._log_data = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
class AuthedTestCase(TestCase):
# Helper because self.client.patch annoying requires you to urlencode
def client_patch(self, url, info={}, **kwargs):
info = urllib.parse.urlencode(info)
return self.client.patch(url, info, **kwargs)
def client_put(self, url, info={}, **kwargs):
info = urllib.parse.urlencode(info)
return self.client.put(url, info, **kwargs)
def client_delete(self, url, info={}, **kwargs):
info = urllib.parse.urlencode(info)
return self.client.delete(url, info, **kwargs)
def login(self, email, password=None):
if password is None:
password = initial_password(email)
return self.client.post('/accounts/login/',
{'username': email, 'password': password})
def register(self, username, password, domain="zulip.com"):
self.client.post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com"):
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
"""
return self.client.post('/accounts/register/',
{'full_name': username, 'password': password,
'key': find_key_by_email(username + '@' + domain),
'terms': True})
def get_api_key(self, email):
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
credentials = "%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode(credentials)
}
def get_streams(self, email):
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile = user_profile,
active = True,
recipient__type = Recipient.STREAM)
return [get_display_recipient(sub.recipient) for sub in subs]
def send_message(self, sender_name, recipient_list, message_type,
content="test content", subject="test", **kwargs):
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(recipient_list, six.string_types):
recipient_list = [recipient_list]
(sending_client, _) = Client.objects.get_or_create(name="test suite")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client.get("/json/messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
realm = get_realm(realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count, exact=False):
actual_count = len(queries)
if exact:
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring, status_code=400):
self.assertIn(msg_substring, self.get_json_error(result, status_code=status_code))
def fixture_data(self, type, action, file_type='json'):
return open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action, file_type))).read()
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
realm = get_realm(resolve_email_to_domain(email))
stream = get_stream(stream_name, realm)
if stream is None:
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data = {}, invite_only=False):
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client.post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
if stream_name != None:
self.subscribe_to_stream(email, stream_name)
result = self.client.post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = self.get_last_message()
self.assertEqual(msg.sender.email, email)
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
return msg
def get_last_message(self):
return Message.objects.latest('id')
def get_all_templates():
templates = []
relpath = os.path.relpath
isfile = os.path.isfile
path_exists = os.path.exists
is_valid_template = lambda p, n: not n.startswith('.') and isfile(p)
def process(template_dir, dirname, fnames):
for name in fnames:
path = os.path.join(dirname, name)
if is_valid_template(path, name):
templates.append(relpath(path, template_dir))
for engine in loader.engines.all():
template_dirs = [d for d in engine.template_dirs if path_exists(d)]
for template_dir in template_dirs:
template_dir = os.path.normpath(template_dir)
os.path.walk(template_dir, process, template_dir)
return templates
| peiwei/zulip | zerver/lib/test_helpers.py | Python | apache-2.0 | 14,518 |
import asyncio
from asyncio import ensure_future
from smartlink import Node, NodeServer
import sys
from pathlib import Path # if you haven't already done so
root = str(Path(__file__).resolve().parents[1])
sys.path.append(root)
from devices import alicat
def main():
node = Node("Flow Control Comupter")
# ports = ['COM1', 'COM3', 'COM8']
pcd = alicat.PCD()
node.add_device(pcd)
ensure_future(pcd.open_port('COM4'))
loop = asyncio.get_event_loop()
server = NodeServer(node, interval=0.2, loop=loop)
server.start()
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.close()
if __name__ == "__main__":
main()
| sgsdxzy/smartlink | bin/FlowControl.py | Python | gpl-3.0 | 707 |
from django.shortcuts import render_to_response
from models import *
from utils import *
import time
import pprint
import urllib2
import simplejson as json
def test(request):
return renderTextResponse("abc")
"""
-> mywalk.com/subscribe?guid=[string]&uri=[string]&extra=[string]
"""
def subscribe(request):
guid = idx(request.REQUEST, "guid")
uri = idx(request.REQUEST, "uri")
extra = idx(request.REQUEST, "extra")
if guid == None or uri == None:
return renderJsonErrorResponse("Invalid request")
device = Device.get_by_key_name(guid)
# return renderJsonErrorResponse("Device already exists")
if device:
device.uri = uri
device.extra = extra
else:
device = Device(key_name = guid, guid = guid, uri = uri, extra = extra)
device.put()
return renderTextResponse("Successfully subscribe device : %s" % guid)
"""
-> logging?guid=[string]&text=[string]×tamp=[time]
"""
def logging(request):
guid = idx(request.REQUEST, "guid")
text = idx(request.REQUEST, "text")
timeStamp = idx(request.REQUEST, "timestamp")
if guid == None or text == None or timeStamp == None:
return renderJsonErrorResponse("Invalid request")
device = Device.get_by_key_name(guid)
if not device:
return renderJsonErrorResponse("Device is not subscribed")
log = Logging(guid = guid, text = text, timeStamp = long(timeStamp))
log.put()
return renderTextResponse("Successfully add log")
"""
-> addSchedule?guid=[string]&xml_text=[string]&scheduleTime=[time]&type=[string]
(type = "IN_APP/ TILE/ TOAST")
"""
def addSchedule(request):
guid = idx(request.REQUEST, "guid")
xmlText = idx(request.REQUEST, "xml_text")
timeStamp = idx(request.REQUEST, "scheduleTime")
creationTime = idx(request.REQUEST, "creationTime")
type = idx(request.REQUEST, "type")
if guid == None or xmlText == None or timeStamp == None or creationTime == None or type == None:
return renderJsonErrorResponse("Invalid request")
device = Device.get_by_key_name(guid)
if not device:
return renderJsonErrorResponse("Device is not subscribed")
schedule = Schedule(device = device,
xmlText = xmlText,
timeStamp = long(timeStamp),
creationTime = long(creationTime),
type = type)
schedule.put()
return renderTextResponse("Successfully add schedule")
"""
-> removeSchedule?guid=[string]&type=[string]
(remove all schedule with that type of that guid)
"""
def removeSchedule(request):
guid = idx(request.REQUEST, "guid")
type = idx(request.REQUEST, "type")
time = idx(request.REQUEST, "upToTime")
if guid == None or type == None:
return renderJsonErrorResponse("Invalid request")
device = Device.get_by_key_name(guid)
if not device:
return renderJsonErrorResponse("Device is not subscribed")
query = Schedule.gql("WHERE device = :1 AND type = :2 AND creationTime < :3", device, type, time)
count = 0
for schedule in query:
count = count + 1
schedule.delete()
return renderTextResponse("Successfully removed %d schedules" % count)
def pushSchedule(schedule):
opener = urllib2.build_opener()
opener.addheaders = [('X-NotificationClass', '3')]
if schedule.type == 'TOAST' :
opener.addheaders = [('X-WindowsPhone-Target', 'toast'), ('X-NotificationClass', '2')]
elif schedule.type == 'TILE':
opener.addheaders = [('X-WindowsPhone-Target', 'token'), ('X-NotificationClass', '1')]
req = urllib2.Request(url = schedule.device.uri, data = schedule.xmlText)
assert req.get_method() == 'POST'
response = opener.open(req)
return response.code
def cron(request):
timeNow = long(time.time())
query = Schedule.gql("WHERE timeStamp < :1 ", timeNow)
count = 0
results = []
for schedule in query:
count = count + 1
firedSchedule = FiredSchedule(device = schedule.device,
xmlText = schedule.xmlText,
timeStamp = schedule.timeStamp,
creationTime = schedule.creationTime,
type = schedule.type)
try:
code = pushSchedule(schedule)
firedSchedule.returnCode = code
results.append('pushed: return code = ' + str(code))
except:
results.append('exception thrown')
firedSchedule.returnCode = -1
pass
firedSchedule.put()
schedule.delete()
return renderTextResponse("Successfully pushed %d schedules <br /> %s" % (count, '<br />'.join(results)))
def nukedata(request):
for d in Device.all(keys_only=True):
db.delete(d)
for l in Logging.all(keys_only=True):
db.delete(l)
for s in Schedule.all(keys_only=True):
db.delete(s)
for s in FiredSchedule.all(keys_only=True):
db.delete(s)
return renderTextResponse("Nuked data")
def main(request):
DEVICE = "1. Device"
LOGGING = "2. Logging"
SCHEDULE = "3. Schedule"
FIREDSCHEDULE = "4. Fired Schedule"
ret = {"0. Time" : long(time.time()),
DEVICE : [], LOGGING : [], SCHEDULE : [], FIREDSCHEDULE : []}
for d in Device.all():
ret[DEVICE].append({"guid" : d.guid,
"uri" : d.uri,
"extra" : d.extra})
for l in Logging.all():
ret[LOGGING].append({"guid" : l.guid,
"text" : l.text,
"timestamp" : l.timeStamp})
for s in Schedule.all():
ret[SCHEDULE].append({"guid" : s.device.guid,
"xml" : s.xmlText,
"time" : s.timeStamp,
"creationTime" : s.creationTime,
"type" : s.type})
for s in FiredSchedule.all():
ret[FIREDSCHEDULE].append({"guid" : s.device.guid,
"xml" : s.xmlText,
"time" : s.timeStamp,
"creationTime" : s.creationTime,
"type" : s.type,
"returnCode" : s.returnCode})
return renderTextResponse("<pre>" + pprint.pformat(ret) + "</pre>")
| lugia/Python-MyWalk | base/views.py | Python | apache-2.0 | 6,682 |
import unittest
from worldengine.drawing_functions import draw_ancientmap, gradient, draw_rivers_on_image
from worldengine.world import World
from tests.draw_test import TestBase, PixelCollector
class TestDrawingFunctions(TestBase):
def setUp(self):
super(TestDrawingFunctions, self).setUp()
self.w = World.open_protobuf("%s/seed_28070.world" % self.tests_data_dir)
def test_draw_ancient_map_factor1(self):
w_large = World.from_pickle_file("%s/seed_48956.world" % self.tests_data_dir)
target = PixelCollector(w_large.width, w_large.height)
draw_ancientmap(w_large, target, resize_factor=1)
self._assert_img_equal("ancientmap_48956", target)
def test_draw_ancient_map_factor3(self):
target = PixelCollector(self.w.width * 3, self.w.height * 3)
draw_ancientmap(self.w, target, resize_factor=3)
self._assert_img_equal("ancientmap_28070_factor3", target)
def test_gradient(self):
self._assert_are_colors_equal((10, 20, 40),
gradient(0.0, 0.0, 1.0, (10, 20, 40), (0, 128, 240)))
self._assert_are_colors_equal((0, 128, 240),
gradient(1.0, 0.0, 1.0, (10, 20, 40), (0, 128, 240)))
self._assert_are_colors_equal((5, 74, 140),
gradient(0.5, 0.0, 1.0, (10, 20, 40), (0, 128, 240)))
def test_draw_rivers_on_image(self):
target = PixelCollector(self.w.width * 2, self.w.height * 2)
draw_rivers_on_image(self.w, target, factor=2)
self._assert_img_equal("rivers_28070_factor2", target)
if __name__ == '__main__':
unittest.main()
| tmfoltz/worldengine | tests/drawing_functions_test.py | Python | mit | 1,681 |
# run tests with
# python -m unittest
# this file only exists to make this directory discoverable by unittest
# you must have MongoDB installed locally to run Mango's tests
| JoshKarpel/mango | tests/__init__.py | Python | mit | 175 |
import io
import re
import base64 as b64
import datetime
from typing import List
from typing import Optional
from typing import Callable
from typing import Generic
from collections import OrderedDict
import numpy as np
from django.urls import reverse
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.http.request import HttpRequest
from django.shortcuts import render
from django.template import loader
from django.db.models import Q
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.db.models import QuerySet
from manager.exceptions import VoltPyNotAllowed
from manager.exceptions import VoltPyFailed
import manager
import manager.plotmanager as mpm
import manager.forms as mforms
import manager.models as mmodels
from manager.helpers.decorators import with_user
def voltpy_render(*args, template_name: str, **kwargs) -> HttpResponse:
"""
This is proxy function which sets usually needed context elements.
It is very much preferred over django default.
WARNING: it removes extra spaces eol's and tabs.
"""
request = kwargs['request']
context = kwargs.pop('context', {})
con_scr = context.get('scripts', '')
accepted_cookies = request.session.get('accepted_cookies', False)
scr = ''.join([
"\n",
con_scr,
])
context['bokeh_scripts'] = scr
context['accepted_cookies'] = accepted_cookies
# context['plot_width'] = mpm.PlotManager.plot_width
# context['plot_height'] = mpm.PlotManager.plot_height
notifications = request.session.pop('VOLTPY_notification', [])
template = loader.get_template(template_name=template_name)
if len(notifications) > 0:
con_note = context.get('notifications', [])
con_note.extend(notifications)
context['notifications'] = con_note
render_str = template.render(request=request, context=context)
render_str = render_str.replace('\t', ' ')
render_str = re.sub(' +', ' ', render_str)
render_str = re.sub(r'\n ', r'\n', render_str)
render_str = re.sub(r'\n+', r'\n', render_str)
return HttpResponse(render_str)
def voltpy_serve_csv(request: HttpRequest, filedata: io.StringIO, filename: str) -> HttpResponse:
from django.utils.encoding import smart_str
response = render(
request=request,
template_name='manager/export.html',
context={'data': filedata.getvalue()}
)
filename = filename.strip()
filename = filename.replace(' ', '_')
filename = "".join(x for x in filename if (x.isalnum() or x in ('_', '+', '-', '.', ',')))
response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(filename)
return response
def add_notification(request: HttpRequest, text: str, severity: int=0) -> None:
now = datetime.datetime.now().strftime('%H:%M:%S')
notifications = request.session.get('VOLTPY_notification', [])
notifications.append({'text': ''.join([now, ': ', text]), 'severity': severity})
request.session['VOLTPY_notification'] = notifications
def delete_helper(
request: HttpRequest,
user: User,
item: Generic,
delete_fun: Optional[Callable]=None,
onSuccessRedirect: Optional[str]=None
) -> HttpResponse:
"""
The generic function to which offers ability to delete
model instance with user confirmation.
"""
if item is None:
return HttpResponseRedirect(
reverse('index')
)
if request.method == 'POST':
form = mforms.DeleteForm(item, request.POST)
if form.is_valid():
a = form.process(item, delete_fun)
if a:
if onSuccessRedirect is not None:
return HttpResponseRedirect(
onSuccessRedirect
)
return HttpResponseRedirect(
reverse('index')
)
else:
form = mforms.DeleteForm(item)
context = {
'form': form,
'item': item,
'user': user
}
return voltpy_render(
request=request,
template_name='manager/deleteGeneric.html',
context=context
)
def generate_plot(
request: HttpRequest,
user: User,
to_plot: Optional[str]=None,
plot_type: Optional[str]=None,
value_id: Optional[int]=None,
**kwargs
) -> List:
assert (to_plot is not None and plot_type is None) or (to_plot is None and plot_type is not None)
if to_plot is not None:
if isinstance(to_plot, mmodels.File):
plot_type = 'file'
elif isinstance(to_plot, mmodels.Dataset):
plot_type = 'dataset'
elif isinstance(to_plot, mmodels.Fileset):
plot_type = 'fileset'
elif isinstance(to_plot, mmodels.Analysis):
plot_type = 'analysis'
else:
raise VoltPyFailed('Could not plot')
allowedTypes = [
'file',
'analysis',
'dataset',
'fileset',
]
if plot_type not in allowedTypes:
raise VoltPyNotAllowed('Operation not allowed.')
vtype = kwargs.get('vtype', plot_type)
vid = kwargs.get('vid', value_id)
addTo = kwargs.get('add', None)
pm = mpm.PlotManager()
data = []
if plot_type == 'file':
if to_plot is None:
cf = mmodels.File.get(id=value_id)
else:
cf = to_plot
data = pm.datasetHelper(user, cf)
pm.xlabel = pm.xLabelHelper(user)
pm.include_x_switch = True
elif plot_type == 'dataset':
if to_plot is None:
cs = mmodels.Dataset.get(id=value_id)
else:
cs = to_plot
data = pm.datasetHelper(user, cs)
pm.xlabel = pm.xLabelHelper(user)
pm.include_x_switch = True
elif plot_type == 'analysis':
if to_plot is None:
data = pm.analysisHelper(user, value_id)
else:
data = to_plot
pm.xlabel = pm.xLabelHelper(user)
pm.include_x_switch = False
elif plot_type == 'fileset':
if to_plot is None:
fs = mmodels.Fileset.get(id=value_id)
else:
fs = to_plot
data = []
for f in fs.files.all():
data.extend(pm.datasetHelper(user, f))
pm.xlabel = pm.xLabelHelper(user)
pm.include_x_switch = True
pm.ylabel = 'i / µA'
pm.setInteraction(kwargs.get('interactionName', 'none'))
for d in data:
pm.add(**d)
if addTo:
for a in addTo:
pm.add(**a)
return pm.getEmbeded(request, user, vtype, vid)
def is_number(s: Generic) -> bool:
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def form_helper(
request: HttpRequest,
user: User,
formClass: Callable,
submitName: str='formSubmit',
submitText: str='Submit',
formExtraData: dict={},
formTemplate: str='manager/form_inline.html'
) -> dict:
if request.method == 'POST':
if submitName in request.POST:
formInstance = formClass(request.POST, **formExtraData)
if formInstance.is_valid():
formInstance.process(user, request)
else:
formInstance = formClass(**formExtraData)
else:
formInstance = formClass(**formExtraData)
loadedTemplate = loader.get_template(formTemplate)
form_context = {
'form': formInstance,
'submit': submitName,
'submit_text': submitText,
}
form_txt = loadedTemplate.render(
context=form_context,
request=request
)
return {'html': form_txt, 'instance': formInstance}
def get_redirect_class(redirectUrl: str) -> str:
ret = '_voltJS_urlChanger _voltJS_url@%s'
return ret % b64.b64encode(redirectUrl.encode()).decode('UTF-8')
def check_dataset_integrity(dataset: mmodels.Dataset, params_to_check: List[int]) -> None:
if len(dataset.curves_data.all()) < 2:
return
cd1 = dataset.curves_data.all()[0]
for cd in dataset.curves_data.all():
for p in params_to_check:
if cd.curve.params[p] != cd1.curve.params[p]:
raise VoltPyFailed('All curves in dataset have to be similar.')
def send_change_mail(user: User) -> bool:
subject = 'Confirm voltammetry.center new email address'
message = loader.render_to_string('mails/confirm_new_email.html', {
'user': user,
'domain': Site.objects.get_current(),
'uid': user.id,
'token': user.profile.new_email_confirmation_hash,
})
return send_mail(subject, message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[user.profile.new_email])
def generate_share_link(user: User, perm: str, obj: Generic) -> str:
import random
import string
if obj.owner != user:
raise VoltPyNotAllowed()
try:
old = manager.models.SharedLink.get(
object_type=obj.__class__.__name__,
object_id=obj.id,
permissions=perm
)
return old.getLink()
except:
pass
gen_string = ''.join([
random.choice(string.ascii_letters + string.digits) for _ in range(32)
])
while manager.models.SharedLink.objects.filter(link=gen_string).exists():
gen_string = ''.join([
random.choice(string.ascii_letters + string.digits) for _ in range(32)
])
sl = manager.models.SharedLink(
object_type=obj.__class__.__name__,
object_id=obj.id,
permissions=perm,
link=gen_string
)
sl.save()
return sl.getLink()
def paginate(request: HttpRequest, queryset: QuerySet, sortable_by: List[str], current_page: int) -> str:
page_size = 15
path = request.path
txt_sort = ''
search_string = ''
if request.method == 'POST':
search_string = request.POST.get('search', '')
if request.method == 'GET':
search_string = request.GET.get('search', '')
if search_string:
dbquery = Q(name__icontains=search_string)
if 'filename' in sortable_by:
dbquery |= Q(filename=search_string)
if 'dataset' in sortable_by:
dbquery |= Q(dataset__name__icontains=search_string)
if 'analytes' in sortable_by:
dbquery |= Q(analytes__name__icontains=search_string)
if 'method' in sortable_by:
dbquery |= Q(method_display_name__icontains=search_string)
queryset = queryset.filter(dbquery)
if request.method in ['GET', 'POST']:
if request.GET.get('sort', False):
sort_by = request.GET.get('sort')
if sort_by in sortable_by:
if sort_by == 'analytes':
from django.db.models import Min
order_by = sort_by
txt_sort = '?sort=%s' % sort_by
queryset = queryset.annotate(an_name=Min('analytes__name')).order_by('an_name')
elif sort_by == 'dataset':
order_by = sort_by
txt_sort = '?sort=%s' % sort_by
queryset = queryset.order_by('dataset__name')
else:
order_by = sort_by
txt_sort = '?sort=%s' % sort_by
queryset = queryset.order_by(order_by)
else:
queryset = queryset.order_by('-id')
else:
raise VoltPyFailed('Error.')
splpath = path.split('/')
if is_number(splpath[-2]):
path = '/'.join(splpath[:-2])
path += '/'
ret = {}
elements = len(queryset)
ret['number_of_pages'] = int(np.ceil(elements / page_size))
if current_page <= 0 or current_page > ret['number_of_pages']:
# TODO: Log wrong page number
current_page = 1
start = (current_page - 1) * page_size
end = start + page_size
ret['search_append'] = ''
if search_string != '':
from urllib.parse import quote
sanitize_search = quote(search_string)
ret['search_append'] = '&search=' + sanitize_search
if not txt_sort:
txt_sort = '?search=%s' % sanitize_search
else:
txt_sort += '&search=%s' % sanitize_search
items_count = len(queryset)
ret['current_page_content'] = queryset[start:end:1]
search_url = request.get_full_path()
if 'search=' in search_url:
try:
search_url = search_url[:search_url.index('&search=')]
except:
search_url = search_url[:search_url.index('search=')]
ret['search_url'] = search_url
ret['paginator'] = ''.join([
'<div class="paginator">',
'<a href="%s1/%s">[<<]</a> ' % (path, txt_sort),
'<a href="%s%s/%s">[<]</a> ' % (path, str(current_page - 1) if (current_page > 1) else "1", txt_sort),
])
for i in range(ret['number_of_pages']):
p = str(i + 1)
if int(p) == current_page:
ret['paginator'] += '[{num}] '.format(num=p)
else:
ret['paginator'] += '<a href="{path}{num}/{sort}">[{num}]</a> '.format(
path=path,
num=p,
sort=txt_sort
)
search_string = search_string.replace('<', '<').replace('>', '>')
ret['search_results_for'] = (('<span class="css_search">Search results for <i>%s</i>:</span><br />' % search_string) if search_string else '')
ret['paginator'] += ''.join([
'<a href="%s%s/%s">[>]</a> ' % (path, str(current_page + 1) if (current_page < ret['number_of_pages']) else str(ret['number_of_pages']), txt_sort),
'<a href="%s%s/%s">[>>]</a>' % (path, str(ret['number_of_pages']), txt_sort),
' %d items out of %s ' % (len(ret['current_page_content']), items_count),
'</div>'
])
return ret
def get_user() -> User:
return with_user._user
def export_curves_data_as_csv(cds: List[mmodels.CurveData]) -> io.StringIO:
"""
Turn a list of CurveData instances into CSV file.
"""
cdict = {}
explen = len(cds)
for i, cd in enumerate(cds):
for x, y in zip(cd.xVector, cd.yVector):
tmp = cdict.get(x, [None] * explen)
tmp[i] = y
cdict[x] = tmp
sortdict = OrderedDict(sorted(cdict.items()))
xcol = np.array(list(sortdict.keys())).reshape((-1, 1))
ycols = np.array(list(sortdict.values()))
allCols = np.concatenate((xcol, ycols), axis=1)
memoryFile = io.StringIO()
np.savetxt(memoryFile, allCols, delimiter=",", newline="\r\n", fmt='%s')
return memoryFile
| efce/voltPy | manager/helpers/functions.py | Python | gpl-3.0 | 14,859 |
from gwt.ui.FocusListener import (
DOM,
Event,
FOCUS_EVENTS,
FocusHandler,
fireFocusEvent,
)
| anandology/pyjamas | library/pyjamas/ui/FocusListener.py | Python | apache-2.0 | 113 |
import unittest
from context import html2md
from assertions import assertEq
class PreTest(unittest.TestCase):
def test_default(self):
in_html = u'''
<pre>
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
</pre>'''
out_md = u'''
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)'''
assertEq(out_md, html2md.html2md(in_html))
def test_default_with_attributes(self):
in_html = u'''
<pre class="code python literal-block">
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
</pre>'''
out_md = u'''
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)'''
assertEq(out_md, html2md.html2md(in_html))
def test_default_with_code(self):
in_html = u'''
<pre><code>
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
</code></pre>'''
out_md = u'''
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)'''
assertEq(out_md, html2md.html2md(in_html))
def test_github_with_attributes(self):
in_html = u'''
<p>A code sample:</p>
<pre class="code python literal-block">
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
</pre>'''
out_md = u'''
A code sample:
```code python literal-block
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
```'''
assertEq(out_md, html2md.html2md(in_html, fenced_code='github'))
class PygmentsPreTest(unittest.TestCase):
def atest_pygments(self):
in_html = u'''
<pre class="code python literal-block">
<span class="kn">from</span> <span class="nn">BeautifulSoup</span> <span class="kn">import</span> <span class="n">BeautifulSoup</span>
<span class="kn">import</span> <span class="nn">urllib2</span>
<span class="n">soup</span> <span class="o">=</span> <span class="n">BeautifulSoup</span><span class="p">(</span><span class="n">urllib2</span><span class="o">.</span><span class="n">urlopen</span><span class="p">(</span><span class="s">'http://java.sun.com'</span><span class="p">)</span><span class="o">.</span><span class="n">read</span><span class="p">())</span>
<span class="n">menu</span> <span class="o">=</span> <span class="n">soup</span><span class="o">.</span><span class="n">findAll</span><span class="p">(</span><span class="s">'div'</span><span class="p">,</span><span class="n">attrs</span><span class="o">=</span><span class="p">{</span><span class="s">'class'</span><span class="p">:</span><span class="s">'pad'</span><span class="p">})</span>
<span class="k">for</span> <span class="n">subMenu</span> <span class="ow">in</span> <span class="n">menu</span><span class="p">:</span>
<span class="n">links</span> <span class="o">=</span> <span class="n">subMenu</span><span class="o">.</span><span class="n">findAll</span><span class="p">(</span><span class="s">'a'</span><span class="p">)</span>
<span class="k">for</span> <span class="n">link</span> <span class="ow">in</span> <span class="n">links</span><span class="p">:</span>
<span class="k">print</span> <span class="s">"</span><span class="si">%s</span><span class="s"> : </span><span class="si">%s</span><span class="s">"</span> <span class="o">%</span> <span class="p">(</span><span class="n">link</span><span class="o">.</span><span class="n">string</span><span class="p">,</span> <span class="n">link</span><span class="p">[</span><span class="s">'href'</span><span class="p">])</span>
</pre>'''
out_md = u'''
from BeautifulSoup import BeautifulSoup
import urllib2
soup = BeautifulSoup(urllib2.urlopen('http://java.sun.com').read())
menu = soup.findAll('div',attrs={'class':'pad'})
for subMenu in menu:
links = subMenu.findAll('a')
for link in links:
print "%s : %s" % (link.string, link['href'])
'''
assertEq(out_md, html2md.html2md(in_html))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(PreTest)
if __name__ == '__main__':
unittest.main()
| al3xandru/html2md | tests/test_pre.py | Python | bsd-2-clause | 6,782 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='iotly',
version='0.1.0',
description='IOT node based on Raspberry Pi GPIO',
long_description=readme,
author='Simone Notargiacomo',
author_email='[email protected]',
url='https://github.com/nottix/iotly',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
| nottix/iotly | setup.py | Python | bsd-2-clause | 503 |
import struct
from Ensemble.Ensemble import Ensemble
from log import logger
from datetime import datetime
class EnsembleData:
"""
Ensemble Data DataSet.
Integer values that give details about the ensemble.
"""
def __init__(self, num_elements, element_multiplier):
self.ds_type = 10
self.num_elements = num_elements
self.element_multiplier = element_multiplier
self.image = 0
self.name_len = 8
self.Name = "E000008"
self.EnsembleNumber = 0
self.NumBins = 0
self.NumBeams = 0
self.DesiredPingCount = 0
self.ActualPingCount = 0
self.SerialNumber = ""
self.SysFirmwareMajor = ""
self.SysFirmwareMinor = ""
self.SysFirmwareRevision = ""
self.SysFirmwareSubsystemCode = ""
self.SubsystemConfig = ""
self.Status = 0
self.Year = 0
self.Month = 0
self.Day = 0
self.Hour = 0
self.Minute = 0
self.Second = 0
self.HSec = 0
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the values.
:param data: Bytearray for the dataset.
"""
packet_pointer = Ensemble.GetBaseDataSize(self.name_len)
self.EnsembleNumber = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 0, Ensemble().BytesInInt32, data)
self.NumBins = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 1, Ensemble().BytesInInt32, data)
self.NumBeams = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 2, Ensemble().BytesInInt32, data)
self.DesiredPingCount = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 3, Ensemble().BytesInInt32, data)
self.ActualPingCount = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 4, Ensemble().BytesInInt32, data)
self.Status = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 5, Ensemble().BytesInInt32, data)
self.Year = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 6, Ensemble().BytesInInt32, data)
self.Month = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 7, Ensemble().BytesInInt32, data)
self.Day = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 8, Ensemble().BytesInInt32, data)
self.Hour = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 9, Ensemble().BytesInInt32, data)
self.Minute = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 10, Ensemble().BytesInInt32, data)
self.Second = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 11, Ensemble().BytesInInt32, data)
self.HSec = Ensemble.GetInt32(packet_pointer + Ensemble().BytesInInt32 * 12, Ensemble().BytesInInt32, data)
self.SerialNumber = str(data[packet_pointer+Ensemble().BytesInInt32*13:packet_pointer+Ensemble().BytesInInt32*21], "UTF-8")
self.SysFirmwareRevision = struct.unpack("B", data[packet_pointer+Ensemble().BytesInInt32*21 + 0:packet_pointer+Ensemble().BytesInInt32*21 + 1])[0]
self.SysFirmwareMinor = struct.unpack("B", data[packet_pointer+Ensemble().BytesInInt32*21 + 1:packet_pointer+Ensemble().BytesInInt32*21 + 2])[0]
self.SysFirmwareMajor = struct.unpack("B", data[packet_pointer + Ensemble().BytesInInt32 * 21 + 2:packet_pointer + Ensemble().BytesInInt32 * 21 + 3])[0]
self.SysFirmwareSubsystemCode = str(data[packet_pointer + Ensemble().BytesInInt32 * 21 + 3:packet_pointer + Ensemble().BytesInInt32 * 21 + 4], "UTF-8")
self.SubsystemConfig = struct.unpack("B", data[packet_pointer + Ensemble().BytesInInt32 * 22 + 3:packet_pointer + Ensemble().BytesInInt32 * 22 + 4])[0]
logger.debug(self.EnsembleNumber)
logger.debug(str(self.Month) + "/" + str(self.Day) + "/" + str(self.Year) + " " + str(self.Hour) + ":" + str(self.Minute) + ":" + str(self.Second) + "." + str(self.HSec))
logger.debug(self.SerialNumber)
logger.debug(str(self.SysFirmwareMajor) + "." + str(self.SysFirmwareMinor) + "." + str(self.SysFirmwareRevision) + "-" + str(self.SysFirmwareSubsystemCode))
logger.debug(self.SubsystemConfig)
def datetime_str(self):
"""
Return the date and time as a string.
:return: Date time string. 2013/07/30 21:00:00.00
"""
return str(self.Year).zfill(4) + "/" + str(self.Month).zfill(2) + "/" + str(self.Day).zfill(2) + " " + str(self.Hour).zfill(2) + ":" + str(self.Minute).zfill(2) + ":" + str(self.Second).zfill(2) + "." + str(self.HSec).zfill(2)
def datetime(self):
return datetime(self.Year, self.Month, self.Day, self.Hour, self.Minute, self.Second, self.HSec * 10)
def firmware_str(self):
return "{0}.{1}.{2} - {3}".format(self.SysFirmwareMajor, self.SysFirmwareMinor, self.SysFirmwareRevision, self.SysFirmwareSubsystemCode) | ricorx7/rti-python | Ensemble/EnsembleData.py | Python | bsd-3-clause | 4,924 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.